1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.114-1"
60 #define DRV_MODULE_RELDATE "2009/07/29"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
105 module_param(poll, int, 0);
106 MODULE_PARM_DESC(poll, " Use polling (for debug)");
108 static int mrrs = -1;
109 module_param(mrrs, int, 0);
110 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
113 module_param(debug, int, 0);
114 MODULE_PARM_DESC(debug, " Default debug msglevel");
116 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
118 static struct workqueue_struct *bnx2x_wq;
120 enum bnx2x_board_type {
126 /* indexed by board_type, above */
129 } board_info[] __devinitdata = {
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
136 static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
153 * locking is done by mcp
155 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
175 static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
182 /* copy command into DMAE command memory and set DMAE command go */
183 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
199 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
202 struct dmae_command *dmae = &bp->init_dmae;
203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
215 mutex_lock(&bp->dmae_mutex);
217 memset(dmae, 0, sizeof(struct dmae_command));
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
225 DMAE_CMD_ENDIANITY_DW_SWAP |
227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
236 dmae->comp_val = DMAE_COMP_VAL;
238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
259 BNX2X_ERR("DMAE timeout!\n");
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
270 mutex_unlock(&bp->dmae_mutex);
273 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
275 struct dmae_command *dmae = &bp->init_dmae;
276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
290 mutex_lock(&bp->dmae_mutex);
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 DMAE_CMD_ENDIANITY_DW_SWAP |
303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
312 dmae->comp_val = DMAE_COMP_VAL;
314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
328 while (*wb_comp != DMAE_COMP_VAL) {
331 BNX2X_ERR("DMAE timeout!\n");
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
345 mutex_unlock(&bp->dmae_mutex);
348 /* used only for slowpath so not inlined */
349 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
359 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
363 REG_RD_DMAE(bp, reg, wb_data, 2);
365 return HILO_U64(wb_data[0], wb_data[1]);
369 static int bnx2x_mc_assert(struct bnx2x *bp)
373 u32 row0, row1, row2, row3;
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
490 static void bnx2x_fw_dump(struct bnx2x *bp)
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
497 mark = ((mark + 0x3) & ~0x3);
498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
500 printk(KERN_ERR PFX);
501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
506 printk(KERN_CONT "%s", (char *)data);
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
513 printk(KERN_CONT "%s", (char *)data);
515 printk(KERN_ERR PFX "end of fw dump\n");
518 static void bnx2x_panic_dump(struct bnx2x *bp)
523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
526 BNX2X_ERR("begin crash dump -----------------\n");
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
537 for_each_rx_queue(bp, i) {
538 struct bnx2x_fastpath *fp = &bp->fp[i];
540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
543 i, fp->rx_bd_prod, fp->rx_bd_cons,
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
554 for_each_tx_queue(bp, i) {
555 struct bnx2x_fastpath *fp = &bp->fp[i];
557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
563 fp->status_blk->c_status_block.status_block_index,
564 fp->tx_db.data.prod);
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
574 for (j = start; j != end; j = RX_BD(j + 1)) {
575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
584 for (j = start; j != end; j = RX_SGE(j + 1)) {
585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
627 BNX2X_ERR("end crash dump -----------------\n");
630 static void bnx2x_int_enable(struct bnx2x *bp)
632 int port = BP_PORT(bp);
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
657 REG_WR(bp, addr, val);
659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
665 REG_WR(bp, addr, val);
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
677 /* enable nig and gpio3 attention */
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
686 /* Make sure that interrupts are indeed enabled from here on */
690 static void bnx2x_int_disable(struct bnx2x *bp)
692 int port = BP_PORT(bp);
693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
704 /* flush all outstanding writes */
707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
713 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
718 /* disable interrupt handling */
719 atomic_inc(&bp->intr_sem);
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
726 /* make sure all ISRs are done */
728 synchronize_irq(bp->msix_table[0].vector);
730 for_each_queue(bp, i)
731 synchronize_irq(bp->msix_table[i + offset].vector);
733 synchronize_irq(bp->pdev->irq);
735 /* make sure sp_task is not running */
736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
743 * General service functions
746 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
747 u8 storm, u16 index, u8 op, u8 update)
749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
751 struct igu_ack_register igu_ack;
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
764 /* Make sure that ACK is written */
769 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
771 struct host_status_block *fpsb = fp->status_blk;
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
786 static u16 bnx2x_ack_int(struct bnx2x *bp)
788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
800 * fast path service functions
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
805 /* Tell compiler that consumer and producer can change */
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
810 /* free skb in the packet ring at pos idx
811 * return idx of last bd freed
813 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
819 struct sk_buff *skb = tx_buf->skb;
820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
835 BNX2X_ERR("BAD nbd!\n");
839 new_cons = nbd + tx_buf->first_bd;
841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844 /* Skip a parse bd... */
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867 dev_kfree_skb_any(skb);
868 tx_buf->first_bd = 0;
874 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
880 barrier(); /* Tell compiler that prod and cons can change */
881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
888 #ifdef BNX2X_STOP_ON_ERROR
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
894 return (s16)(fp->bp->tx_ring_size) - used;
897 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
899 struct bnx2x *bp = fp->bp;
900 struct netdev_queue *txq;
901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
904 #ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
913 while (sw_cons != hw_cons) {
916 pkt_cons = TX_BD(sw_cons);
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
921 hw_cons, sw_cons, pkt_cons);
923 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
936 /* TBD need a thresh? */
937 if (unlikely(netif_tx_queue_stopped(txq))) {
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
947 if ((netif_tx_queue_stopped(txq)) &&
948 (bp->state == BNX2X_STATE_OPEN) &&
949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
950 netif_tx_wake_queue(txq);
955 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
964 fp->index, cid, command, bp->state,
965 rr_cqe->ramrod_cqe.ramrod_type);
970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
975 fp->state = BNX2X_FP_STATE_OPEN;
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
981 fp->state = BNX2X_FP_STATE_HALTED;
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1014 bp->set_mac_pending = 0;
1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1024 command, bp->state);
1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
1030 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1037 /* Skip "next page" elements */
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1045 sw_buf->page = NULL;
1050 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1059 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1067 if (unlikely(page == NULL))
1070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1071 PCI_DMA_FROMDEVICE);
1072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1086 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1099 PCI_DMA_FROMDEVICE);
1100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1114 /* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1119 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
1130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1138 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1141 u16 last_max = fp->last_max_sge;
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1147 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1161 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1164 struct bnx2x *bp = fp->bp;
1165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1166 le16_to_cpu(fp_cqe->len_on_bd)) >>
1168 u16 last_max, last_elem, first_elem;
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1214 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1227 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1255 #ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257 #ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1262 fp->tpa_queue_used);
1266 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1271 struct sw_rx_page *rx_pg, old_rx_pg;
1272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1280 /* This is needed in order to enable forwarding support */
1282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1283 max(frag_size, (u32)len_on_bd));
1285 #ifdef BNX2X_STOP_ON_ERROR
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
1303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1304 rx_pg = &fp->rx_page_ring[sge_idx];
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
1311 fp->eth_q_stats.rx_skb_alloc_failed++;
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1326 frag_size -= frag_len;
1332 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1347 if (likely(new_skb)) {
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1359 prefetch(((char *)(skb)) + 128);
1361 #ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1371 skb_reserve(skb, pad);
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380 iph = (struct iphdr *)skb->data;
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
1396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1401 netif_receive_skb(skb);
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1413 /* else drop the packet and keep the buffer in the bin */
1414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
1416 fp->eth_q_stats.rx_skb_alloc_failed++;
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1422 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1427 struct ustorm_eth_rx_producers rx_prods = {0};
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1448 ((u32 *)&rx_prods)[i]);
1450 mmiowb(); /* keep prod updates ordered */
1452 DP(NETIF_MSG_RX_STATUS,
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1457 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1459 struct bnx2x *bp = fp->bp;
1460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1464 #ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
1471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
1477 bd_prod_fw = bd_prod;
1478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1488 fp->index, hw_comp_cons, sw_comp_cons);
1490 while (sw_comp_cons != hw_comp_cons) {
1491 struct sw_rx_bd *rx_buf = NULL;
1492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1501 cqe = &fp->rx_comp_ring[comp_ring_cons];
1502 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1504 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1505 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1506 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1507 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1508 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1511 /* is this a slowpath msg? */
1512 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1513 bnx2x_sp_event(fp, cqe);
1516 /* this is an rx packet */
1518 rx_buf = &fp->rx_buf_ring[bd_cons];
1520 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521 pad = cqe->fast_path_cqe.placement_offset;
1523 /* If CQE is marked both TPA_START and TPA_END
1524 it is a non-TPA CQE */
1525 if ((!fp->disable_tpa) &&
1526 (TPA_TYPE(cqe_fp_flags) !=
1527 (TPA_TYPE_START | TPA_TYPE_END))) {
1528 u16 queue = cqe->fast_path_cqe.queue_index;
1530 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531 DP(NETIF_MSG_RX_STATUS,
1532 "calling tpa_start on queue %d\n",
1535 bnx2x_tpa_start(fp, queue, skb,
1540 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541 DP(NETIF_MSG_RX_STATUS,
1542 "calling tpa_stop on queue %d\n",
1545 if (!BNX2X_RX_SUM_FIX(cqe))
1546 BNX2X_ERR("STOP on none TCP "
1549 /* This is a size of the linear data
1551 len = le16_to_cpu(cqe->fast_path_cqe.
1553 bnx2x_tpa_stop(bp, fp, queue, pad,
1554 len, cqe, comp_ring_cons);
1555 #ifdef BNX2X_STOP_ON_ERROR
1560 bnx2x_update_sge_prod(fp,
1561 &cqe->fast_path_cqe);
1566 pci_dma_sync_single_for_device(bp->pdev,
1567 pci_unmap_addr(rx_buf, mapping),
1568 pad + RX_COPY_THRESH,
1569 PCI_DMA_FROMDEVICE);
1571 prefetch(((char *)(skb)) + 128);
1573 /* is this an error packet? */
1574 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1575 DP(NETIF_MSG_RX_ERR,
1576 "ERROR flags %x rx packet %u\n",
1577 cqe_fp_flags, sw_comp_cons);
1578 fp->eth_q_stats.rx_err_discard_pkt++;
1582 /* Since we don't have a jumbo ring
1583 * copy small packets if mtu > 1500
1585 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586 (len <= RX_COPY_THRESH)) {
1587 struct sk_buff *new_skb;
1589 new_skb = netdev_alloc_skb(bp->dev,
1591 if (new_skb == NULL) {
1592 DP(NETIF_MSG_RX_ERR,
1593 "ERROR packet dropped "
1594 "because of alloc failure\n");
1595 fp->eth_q_stats.rx_skb_alloc_failed++;
1600 skb_copy_from_linear_data_offset(skb, pad,
1601 new_skb->data + pad, len);
1602 skb_reserve(new_skb, pad);
1603 skb_put(new_skb, len);
1605 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610 pci_unmap_single(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
1613 PCI_DMA_FROMDEVICE);
1614 skb_reserve(skb, pad);
1618 DP(NETIF_MSG_RX_ERR,
1619 "ERROR packet dropped because "
1620 "of alloc failure\n");
1621 fp->eth_q_stats.rx_skb_alloc_failed++;
1623 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1627 skb->protocol = eth_type_trans(skb, bp->dev);
1629 skb->ip_summed = CHECKSUM_NONE;
1631 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632 skb->ip_summed = CHECKSUM_UNNECESSARY;
1634 fp->eth_q_stats.hw_csum_err++;
1638 skb_record_rx_queue(skb, fp->index);
1640 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1641 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642 PARSING_FLAGS_VLAN))
1643 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1647 netif_receive_skb(skb);
1653 bd_cons = NEXT_RX_IDX(bd_cons);
1654 bd_prod = NEXT_RX_IDX(bd_prod);
1655 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1658 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1661 if (rx_pkt == budget)
1665 fp->rx_bd_cons = bd_cons;
1666 fp->rx_bd_prod = bd_prod_fw;
1667 fp->rx_comp_cons = sw_comp_cons;
1668 fp->rx_comp_prod = sw_comp_prod;
1670 /* Update producers */
1671 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1674 fp->rx_pkt += rx_pkt;
1680 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1682 struct bnx2x_fastpath *fp = fp_cookie;
1683 struct bnx2x *bp = fp->bp;
1685 /* Return here if interrupt is disabled */
1686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1691 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1692 fp->index, fp->sb_id);
1693 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1695 #ifdef BNX2X_STOP_ON_ERROR
1696 if (unlikely(bp->panic))
1699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp->is_rx_queue) {
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(&fp->status_blk->u_status_block.status_block_index);
1704 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1710 bnx2x_update_fpsb_idx(fp);
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1724 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1726 struct bnx2x *bp = netdev_priv(dev_instance);
1727 u16 status = bnx2x_ack_int(bp);
1731 /* Return here if interrupt is shared and it's not for us */
1732 if (unlikely(status == 0)) {
1733 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1736 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1738 /* Return here if interrupt is disabled */
1739 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1744 #ifdef BNX2X_STOP_ON_ERROR
1745 if (unlikely(bp->panic))
1749 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750 struct bnx2x_fastpath *fp = &bp->fp[i];
1752 mask = 0x2 << fp->sb_id;
1753 if (status & mask) {
1754 /* Handle Rx or Tx according to SB id */
1755 if (fp->is_rx_queue) {
1756 prefetch(fp->rx_cons_sb);
1757 prefetch(&fp->status_blk->u_status_block.
1758 status_block_index);
1760 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1763 prefetch(fp->tx_cons_sb);
1764 prefetch(&fp->status_blk->c_status_block.
1765 status_block_index);
1767 bnx2x_update_fpsb_idx(fp);
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773 le16_to_cpu(fp->fp_u_idx),
1775 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776 le16_to_cpu(fp->fp_c_idx),
1784 if (unlikely(status & 0x1)) {
1785 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1793 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1799 /* end of fast path */
1801 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1806 * General service functions
1809 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1812 u32 resource_bit = (1 << resource);
1813 int func = BP_FUNC(bp);
1814 u32 hw_lock_control_reg;
1817 /* Validating that the resource is within range */
1818 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1820 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1826 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1828 hw_lock_control_reg =
1829 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1832 /* Validating that the resource is not already taken */
1833 lock_status = REG_RD(bp, hw_lock_control_reg);
1834 if (lock_status & resource_bit) {
1835 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1836 lock_status, resource_bit);
1840 /* Try for 5 second every 5ms */
1841 for (cnt = 0; cnt < 1000; cnt++) {
1842 /* Try to acquire the lock */
1843 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844 lock_status = REG_RD(bp, hw_lock_control_reg);
1845 if (lock_status & resource_bit)
1850 DP(NETIF_MSG_HW, "Timeout\n");
1854 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1857 u32 resource_bit = (1 << resource);
1858 int func = BP_FUNC(bp);
1859 u32 hw_lock_control_reg;
1861 /* Validating that the resource is within range */
1862 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1864 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1870 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1872 hw_lock_control_reg =
1873 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1876 /* Validating that the resource is currently taken */
1877 lock_status = REG_RD(bp, hw_lock_control_reg);
1878 if (!(lock_status & resource_bit)) {
1879 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1880 lock_status, resource_bit);
1884 REG_WR(bp, hw_lock_control_reg, resource_bit);
1888 /* HW Lock for shared dual port PHYs */
1889 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1891 mutex_lock(&bp->port.phy_mutex);
1893 if (bp->port.need_hw_lock)
1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1897 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1899 if (bp->port.need_hw_lock)
1900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1902 mutex_unlock(&bp->port.phy_mutex);
1905 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1916 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1921 /* read GPIO value */
1922 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1924 /* get the requested pin value */
1925 if ((gpio_reg & gpio_mask) == gpio_mask)
1930 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1935 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1945 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1950 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1951 /* read GPIO and mask except the float bits */
1952 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957 gpio_num, gpio_shift);
1958 /* clear FLOAT and set CLR */
1959 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965 gpio_num, gpio_shift);
1966 /* clear FLOAT and set SET */
1967 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1971 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1972 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973 gpio_num, gpio_shift);
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1982 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1983 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1988 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2005 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2008 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010 "output low\n", gpio_num, gpio_shift);
2011 /* clear SET and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018 "output high\n", gpio_num, gpio_shift);
2019 /* clear CLR and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2028 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2036 u32 spio_mask = (1 << spio_num);
2039 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040 (spio_num > MISC_REGISTERS_SPIO_7)) {
2041 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2046 /* read SPIO and mask except the float bits */
2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2050 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2051 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052 /* clear FLOAT and set CLR */
2053 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2057 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2058 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059 /* clear FLOAT and set SET */
2060 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2064 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2067 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2074 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2080 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2082 switch (bp->link_vars.ieee_fc &
2083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2084 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2085 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2089 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2090 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2094 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2095 bp->port.advertising |= ADVERTISED_Asym_Pause;
2099 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2105 static void bnx2x_link_report(struct bnx2x *bp)
2107 if (bp->state == BNX2X_STATE_DISABLED) {
2108 netif_carrier_off(bp->dev);
2109 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2113 if (bp->link_vars.link_up) {
2114 if (bp->state == BNX2X_STATE_OPEN)
2115 netif_carrier_on(bp->dev);
2116 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2118 printk("%d Mbps ", bp->link_vars.line_speed);
2120 if (bp->link_vars.duplex == DUPLEX_FULL)
2121 printk("full duplex");
2123 printk("half duplex");
2125 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2126 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2127 printk(", receive ");
2128 if (bp->link_vars.flow_ctrl &
2130 printk("& transmit ");
2132 printk(", transmit ");
2134 printk("flow control ON");
2138 } else { /* link_down */
2139 netif_carrier_off(bp->dev);
2140 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2144 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2146 if (!BP_NOMCP(bp)) {
2149 /* Initialize link parameters structure variables */
2150 /* It is recommended to turn off RX FC for jumbo frames
2151 for better performance */
2152 if (bp->dev->mtu > 5000)
2153 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2155 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2157 bnx2x_acquire_phy_lock(bp);
2159 if (load_mode == LOAD_DIAG)
2160 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2162 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2164 bnx2x_release_phy_lock(bp);
2166 bnx2x_calc_fc_adv(bp);
2168 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2169 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2170 bnx2x_link_report(bp);
2175 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2179 static void bnx2x_link_set(struct bnx2x *bp)
2181 if (!BP_NOMCP(bp)) {
2182 bnx2x_acquire_phy_lock(bp);
2183 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2184 bnx2x_release_phy_lock(bp);
2186 bnx2x_calc_fc_adv(bp);
2188 BNX2X_ERR("Bootcode is missing - can not set link\n");
2191 static void bnx2x__link_reset(struct bnx2x *bp)
2193 if (!BP_NOMCP(bp)) {
2194 bnx2x_acquire_phy_lock(bp);
2195 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2196 bnx2x_release_phy_lock(bp);
2198 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2201 static u8 bnx2x_link_test(struct bnx2x *bp)
2205 bnx2x_acquire_phy_lock(bp);
2206 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2207 bnx2x_release_phy_lock(bp);
2212 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2214 u32 r_param = bp->link_vars.line_speed / 8;
2215 u32 fair_periodic_timeout_usec;
2218 memset(&(bp->cmng.rs_vars), 0,
2219 sizeof(struct rate_shaping_vars_per_port));
2220 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2222 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2223 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2225 /* this is the threshold below which no timer arming will occur
2226 1.25 coefficient is for the threshold to be a little bigger
2227 than the real time, to compensate for timer in-accuracy */
2228 bp->cmng.rs_vars.rs_threshold =
2229 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2231 /* resolution of fairness timer */
2232 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2233 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2234 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2236 /* this is the threshold below which we won't arm the timer anymore */
2237 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2239 /* we multiply by 1e3/8 to get bytes/msec.
2240 We don't want the credits to pass a credit
2241 of the t_fair*FAIR_MEM (algorithm resolution) */
2242 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2243 /* since each tick is 4 usec */
2244 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2247 /* Calculates the sum of vn_min_rates.
2248 It's needed for further normalizing of the min_rates.
2250 sum of vn_min_rates.
2252 0 - if all the min_rates are 0.
2253 In the later case fainess algorithm should be deactivated.
2254 If not all min_rates are zero then those that are zeroes will be set to 1.
2256 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2259 int port = BP_PORT(bp);
2262 bp->vn_weight_sum = 0;
2263 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2264 int func = 2*vn + port;
2265 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2266 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2267 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2269 /* Skip hidden vns */
2270 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2273 /* If min rate is zero - set it to 1 */
2275 vn_min_rate = DEF_MIN_RATE;
2279 bp->vn_weight_sum += vn_min_rate;
2282 /* ... only if all min rates are zeros - disable fairness */
2284 bp->vn_weight_sum = 0;
2287 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2289 struct rate_shaping_vars_per_vn m_rs_vn;
2290 struct fairness_vars_per_vn m_fair_vn;
2291 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2292 u16 vn_min_rate, vn_max_rate;
2295 /* If function is hidden - set min and max to zeroes */
2296 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2301 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2302 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2303 /* If fairness is enabled (not all min rates are zeroes) and
2304 if current min rate is zero - set it to 1.
2305 This is a requirement of the algorithm. */
2306 if (bp->vn_weight_sum && (vn_min_rate == 0))
2307 vn_min_rate = DEF_MIN_RATE;
2308 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2309 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2313 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2314 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2316 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2317 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2319 /* global vn counter - maximal Mbps for this vn */
2320 m_rs_vn.vn_counter.rate = vn_max_rate;
2322 /* quota - number of bytes transmitted in this period */
2323 m_rs_vn.vn_counter.quota =
2324 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2326 if (bp->vn_weight_sum) {
2327 /* credit for each period of the fairness algorithm:
2328 number of bytes in T_FAIR (the vn share the port rate).
2329 vn_weight_sum should not be larger than 10000, thus
2330 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2332 m_fair_vn.vn_credit_delta =
2333 max((u32)(vn_min_rate * (T_FAIR_COEF /
2334 (8 * bp->vn_weight_sum))),
2335 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2336 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2337 m_fair_vn.vn_credit_delta);
2340 /* Store it to internal memory */
2341 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2342 REG_WR(bp, BAR_XSTRORM_INTMEM +
2343 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2344 ((u32 *)(&m_rs_vn))[i]);
2346 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2347 REG_WR(bp, BAR_XSTRORM_INTMEM +
2348 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2349 ((u32 *)(&m_fair_vn))[i]);
2353 /* This function is called upon link interrupt */
2354 static void bnx2x_link_attn(struct bnx2x *bp)
2356 /* Make sure that we are synced with the current statistics */
2357 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2359 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2361 if (bp->link_vars.link_up) {
2363 /* dropless flow control */
2364 if (CHIP_IS_E1H(bp)) {
2365 int port = BP_PORT(bp);
2366 u32 pause_enabled = 0;
2368 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2371 REG_WR(bp, BAR_USTRORM_INTMEM +
2372 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2376 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2377 struct host_port_stats *pstats;
2379 pstats = bnx2x_sp(bp, port_stats);
2380 /* reset old bmac stats */
2381 memset(&(pstats->mac_stx[0]), 0,
2382 sizeof(struct mac_stx));
2384 if ((bp->state == BNX2X_STATE_OPEN) ||
2385 (bp->state == BNX2X_STATE_DISABLED))
2386 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2389 /* indicate link status */
2390 bnx2x_link_report(bp);
2393 int port = BP_PORT(bp);
2397 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2398 if (vn == BP_E1HVN(bp))
2401 func = ((vn << 1) | port);
2403 /* Set the attention towards other drivers
2405 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2406 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2409 if (bp->link_vars.link_up) {
2412 /* Init rate shaping and fairness contexts */
2413 bnx2x_init_port_minmax(bp);
2415 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2416 bnx2x_init_vn_minmax(bp, 2*vn + port);
2418 /* Store it to internal memory */
2420 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2421 REG_WR(bp, BAR_XSTRORM_INTMEM +
2422 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2423 ((u32 *)(&bp->cmng))[i]);
2428 static void bnx2x__link_status_update(struct bnx2x *bp)
2430 int func = BP_FUNC(bp);
2432 if (bp->state != BNX2X_STATE_OPEN)
2435 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2437 if (bp->link_vars.link_up)
2438 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2440 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2442 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2443 bnx2x_calc_vn_weight_sum(bp);
2445 /* indicate link status */
2446 bnx2x_link_report(bp);
2449 static void bnx2x_pmf_update(struct bnx2x *bp)
2451 int port = BP_PORT(bp);
2455 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2457 /* enable nig attention */
2458 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2459 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2460 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2462 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2470 * General service functions
2473 /* send the MCP a request, block until there is a reply */
2474 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2476 int func = BP_FUNC(bp);
2477 u32 seq = ++bp->fw_seq;
2480 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2482 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2483 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2486 /* let the FW do it's magic ... */
2489 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2491 /* Give the FW up to 2 second (200*10ms) */
2492 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2494 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2495 cnt*delay, rc, seq);
2497 /* is this a reply to our command? */
2498 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2499 rc &= FW_MSG_CODE_MASK;
2502 BNX2X_ERR("FW failed to respond!\n");
2510 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2511 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2512 static void bnx2x_set_rx_mode(struct net_device *dev);
2514 static void bnx2x_e1h_disable(struct bnx2x *bp)
2516 int port = BP_PORT(bp);
2519 bp->rx_mode = BNX2X_RX_MODE_NONE;
2520 bnx2x_set_storm_rx_mode(bp);
2522 netif_tx_disable(bp->dev);
2523 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2525 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2527 bnx2x_set_mac_addr_e1h(bp, 0);
2529 for (i = 0; i < MC_HASH_SIZE; i++)
2530 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2532 netif_carrier_off(bp->dev);
2535 static void bnx2x_e1h_enable(struct bnx2x *bp)
2537 int port = BP_PORT(bp);
2539 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2541 bnx2x_set_mac_addr_e1h(bp, 1);
2543 /* Tx queue should be only reenabled */
2544 netif_tx_wake_all_queues(bp->dev);
2546 /* Initialize the receive filter. */
2547 bnx2x_set_rx_mode(bp->dev);
2550 static void bnx2x_update_min_max(struct bnx2x *bp)
2552 int port = BP_PORT(bp);
2555 /* Init rate shaping and fairness contexts */
2556 bnx2x_init_port_minmax(bp);
2558 bnx2x_calc_vn_weight_sum(bp);
2560 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2561 bnx2x_init_vn_minmax(bp, 2*vn + port);
2566 /* Set the attention towards other drivers on the same port */
2567 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2568 if (vn == BP_E1HVN(bp))
2571 func = ((vn << 1) | port);
2572 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2573 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2576 /* Store it to internal memory */
2577 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2578 REG_WR(bp, BAR_XSTRORM_INTMEM +
2579 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2580 ((u32 *)(&bp->cmng))[i]);
2584 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2586 int func = BP_FUNC(bp);
2588 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2589 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2591 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2593 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2594 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2595 bp->state = BNX2X_STATE_DISABLED;
2597 bnx2x_e1h_disable(bp);
2599 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2600 bp->state = BNX2X_STATE_OPEN;
2602 bnx2x_e1h_enable(bp);
2604 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2606 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2608 bnx2x_update_min_max(bp);
2609 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2612 /* Report results to MCP */
2614 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2616 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2619 /* the slow path queue is odd since completions arrive on the fastpath ring */
2620 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2621 u32 data_hi, u32 data_lo, int common)
2623 int func = BP_FUNC(bp);
2625 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2626 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2627 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2628 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2629 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2631 #ifdef BNX2X_STOP_ON_ERROR
2632 if (unlikely(bp->panic))
2636 spin_lock_bh(&bp->spq_lock);
2638 if (!bp->spq_left) {
2639 BNX2X_ERR("BUG! SPQ ring full!\n");
2640 spin_unlock_bh(&bp->spq_lock);
2645 /* CID needs port number to be encoded int it */
2646 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2647 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2649 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2651 bp->spq_prod_bd->hdr.type |=
2652 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2654 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2655 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2659 if (bp->spq_prod_bd == bp->spq_last_bd) {
2660 bp->spq_prod_bd = bp->spq;
2661 bp->spq_prod_idx = 0;
2662 DP(NETIF_MSG_TIMER, "end of spq\n");
2669 /* Make sure that BD data is updated before writing the producer */
2672 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677 spin_unlock_bh(&bp->spq_lock);
2681 /* acquire split MCP access lock register */
2682 static int bnx2x_acquire_alr(struct bnx2x *bp)
2689 for (j = 0; j < i*10; j++) {
2691 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2692 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2693 if (val & (1L << 31))
2698 if (!(val & (1L << 31))) {
2699 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2706 /* release split MCP access lock register */
2707 static void bnx2x_release_alr(struct bnx2x *bp)
2711 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2714 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2716 struct host_def_status_block *def_sb = bp->def_status_blk;
2719 barrier(); /* status block is written to by the chip */
2720 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2721 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2724 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2725 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2728 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2729 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2732 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2733 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2736 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2737 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2744 * slow path service functions
2747 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2749 int port = BP_PORT(bp);
2750 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2751 COMMAND_REG_ATTN_BITS_SET);
2752 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2753 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2754 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2755 NIG_REG_MASK_INTERRUPT_PORT0;
2759 if (bp->attn_state & asserted)
2760 BNX2X_ERR("IGU ERROR\n");
2762 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 aeu_mask = REG_RD(bp, aeu_addr);
2765 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2766 aeu_mask, asserted);
2767 aeu_mask &= ~(asserted & 0xff);
2768 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2770 REG_WR(bp, aeu_addr, aeu_mask);
2771 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2773 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2774 bp->attn_state |= asserted;
2775 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2777 if (asserted & ATTN_HARD_WIRED_MASK) {
2778 if (asserted & ATTN_NIG_FOR_FUNC) {
2780 bnx2x_acquire_phy_lock(bp);
2782 /* save nig interrupt mask */
2783 nig_mask = REG_RD(bp, nig_int_mask_addr);
2784 REG_WR(bp, nig_int_mask_addr, 0);
2786 bnx2x_link_attn(bp);
2788 /* handle unicore attn? */
2790 if (asserted & ATTN_SW_TIMER_4_FUNC)
2791 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2793 if (asserted & GPIO_2_FUNC)
2794 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2796 if (asserted & GPIO_3_FUNC)
2797 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2799 if (asserted & GPIO_4_FUNC)
2800 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2803 if (asserted & ATTN_GENERAL_ATTN_1) {
2804 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2805 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2807 if (asserted & ATTN_GENERAL_ATTN_2) {
2808 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2809 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2811 if (asserted & ATTN_GENERAL_ATTN_3) {
2812 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2813 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2816 if (asserted & ATTN_GENERAL_ATTN_4) {
2817 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2818 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2820 if (asserted & ATTN_GENERAL_ATTN_5) {
2821 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2822 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2824 if (asserted & ATTN_GENERAL_ATTN_6) {
2825 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2826 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2830 } /* if hardwired */
2832 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2834 REG_WR(bp, hc_addr, asserted);
2836 /* now set back the mask */
2837 if (asserted & ATTN_NIG_FOR_FUNC) {
2838 REG_WR(bp, nig_int_mask_addr, nig_mask);
2839 bnx2x_release_phy_lock(bp);
2843 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2845 int port = BP_PORT(bp);
2847 /* mark the failure */
2848 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2849 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2850 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2851 bp->link_params.ext_phy_config);
2853 /* log the failure */
2854 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2855 " the driver to shutdown the card to prevent permanent"
2856 " damage. Please contact Dell Support for assistance\n",
2859 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2861 int port = BP_PORT(bp);
2863 u32 val, swap_val, swap_override;
2865 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2866 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2868 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2870 val = REG_RD(bp, reg_offset);
2871 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2872 REG_WR(bp, reg_offset, val);
2874 BNX2X_ERR("SPIO5 hw attention\n");
2876 /* Fan failure attention */
2877 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2879 /* Low power mode is controlled by GPIO 2 */
2880 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2881 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2882 /* The PHY reset is controlled by GPIO 1 */
2883 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2884 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2888 /* The PHY reset is controlled by GPIO 1 */
2889 /* fake the port number to cancel the swap done in
2891 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2892 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2893 port = (swap_val && swap_override) ^ 1;
2894 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2895 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2901 bnx2x_fan_failure(bp);
2904 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2905 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2906 bnx2x_acquire_phy_lock(bp);
2907 bnx2x_handle_module_detect_int(&bp->link_params);
2908 bnx2x_release_phy_lock(bp);
2911 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2913 val = REG_RD(bp, reg_offset);
2914 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2915 REG_WR(bp, reg_offset, val);
2917 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2918 (attn & HW_INTERRUT_ASSERT_SET_0));
2923 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2927 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2929 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2930 BNX2X_ERR("DB hw attention 0x%x\n", val);
2931 /* DORQ discard attention */
2933 BNX2X_ERR("FATAL error from DORQ\n");
2936 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2938 int port = BP_PORT(bp);
2941 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2942 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2944 val = REG_RD(bp, reg_offset);
2945 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2946 REG_WR(bp, reg_offset, val);
2948 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2949 (attn & HW_INTERRUT_ASSERT_SET_1));
2954 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2958 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2960 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2961 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2962 /* CFC error attention */
2964 BNX2X_ERR("FATAL error from CFC\n");
2967 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2969 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2970 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2971 /* RQ_USDMDP_FIFO_OVERFLOW */
2973 BNX2X_ERR("FATAL error from PXP\n");
2976 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2978 int port = BP_PORT(bp);
2981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2986 REG_WR(bp, reg_offset, val);
2988 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2989 (attn & HW_INTERRUT_ASSERT_SET_2));
2994 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2998 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3000 if (attn & BNX2X_PMF_LINK_ASSERT) {
3001 int func = BP_FUNC(bp);
3003 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3004 val = SHMEM_RD(bp, func_mb[func].drv_status);
3005 if (val & DRV_STATUS_DCC_EVENT_MASK)
3007 (val & DRV_STATUS_DCC_EVENT_MASK));
3008 bnx2x__link_status_update(bp);
3009 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3010 bnx2x_pmf_update(bp);
3012 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3014 BNX2X_ERR("MC assert!\n");
3015 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3016 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3017 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3018 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3021 } else if (attn & BNX2X_MCP_ASSERT) {
3023 BNX2X_ERR("MCP assert!\n");
3024 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3028 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3031 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3032 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3033 if (attn & BNX2X_GRC_TIMEOUT) {
3034 val = CHIP_IS_E1H(bp) ?
3035 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3036 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3038 if (attn & BNX2X_GRC_RSV) {
3039 val = CHIP_IS_E1H(bp) ?
3040 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3041 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3043 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3047 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3049 struct attn_route attn;
3050 struct attn_route group_mask;
3051 int port = BP_PORT(bp);
3057 /* need to take HW lock because MCP or other port might also
3058 try to handle this event */
3059 bnx2x_acquire_alr(bp);
3061 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3062 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3063 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3064 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3065 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3066 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3068 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3069 if (deasserted & (1 << index)) {
3070 group_mask = bp->attn_group[index];
3072 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3073 index, group_mask.sig[0], group_mask.sig[1],
3074 group_mask.sig[2], group_mask.sig[3]);
3076 bnx2x_attn_int_deasserted3(bp,
3077 attn.sig[3] & group_mask.sig[3]);
3078 bnx2x_attn_int_deasserted1(bp,
3079 attn.sig[1] & group_mask.sig[1]);
3080 bnx2x_attn_int_deasserted2(bp,
3081 attn.sig[2] & group_mask.sig[2]);
3082 bnx2x_attn_int_deasserted0(bp,
3083 attn.sig[0] & group_mask.sig[0]);
3085 if ((attn.sig[0] & group_mask.sig[0] &
3086 HW_PRTY_ASSERT_SET_0) ||
3087 (attn.sig[1] & group_mask.sig[1] &
3088 HW_PRTY_ASSERT_SET_1) ||
3089 (attn.sig[2] & group_mask.sig[2] &
3090 HW_PRTY_ASSERT_SET_2))
3091 BNX2X_ERR("FATAL HW block parity attention\n");
3095 bnx2x_release_alr(bp);
3097 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3100 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3102 REG_WR(bp, reg_addr, val);
3104 if (~bp->attn_state & deasserted)
3105 BNX2X_ERR("IGU ERROR\n");
3107 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3108 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3110 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3111 aeu_mask = REG_RD(bp, reg_addr);
3113 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3114 aeu_mask, deasserted);
3115 aeu_mask |= (deasserted & 0xff);
3116 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3118 REG_WR(bp, reg_addr, aeu_mask);
3119 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3121 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3122 bp->attn_state &= ~deasserted;
3123 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3126 static void bnx2x_attn_int(struct bnx2x *bp)
3128 /* read local copy of bits */
3129 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3131 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3133 u32 attn_state = bp->attn_state;
3135 /* look for changed bits */
3136 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3137 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3140 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3141 attn_bits, attn_ack, asserted, deasserted);
3143 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3144 BNX2X_ERR("BAD attention state\n");
3146 /* handle bits that were raised */
3148 bnx2x_attn_int_asserted(bp, asserted);
3151 bnx2x_attn_int_deasserted(bp, deasserted);
3154 static void bnx2x_sp_task(struct work_struct *work)
3156 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3160 /* Return here if interrupt is disabled */
3161 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3162 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3166 status = bnx2x_update_dsb_idx(bp);
3167 /* if (status == 0) */
3168 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3170 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3176 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3178 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3180 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3182 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3184 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3189 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3191 struct net_device *dev = dev_instance;
3192 struct bnx2x *bp = netdev_priv(dev);
3194 /* Return here if interrupt is disabled */
3195 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3200 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3202 #ifdef BNX2X_STOP_ON_ERROR
3203 if (unlikely(bp->panic))
3207 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3212 /* end of slow path */
3216 /****************************************************************************
3218 ****************************************************************************/
3220 /* sum[hi:lo] += add[hi:lo] */
3221 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3224 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3227 /* difference = minuend - subtrahend */
3228 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3230 if (m_lo < s_lo) { \
3232 d_hi = m_hi - s_hi; \
3234 /* we can 'loan' 1 */ \
3236 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3238 /* m_hi <= s_hi */ \
3243 /* m_lo >= s_lo */ \
3244 if (m_hi < s_hi) { \
3248 /* m_hi >= s_hi */ \
3249 d_hi = m_hi - s_hi; \
3250 d_lo = m_lo - s_lo; \
3255 #define UPDATE_STAT64(s, t) \
3257 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3258 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3259 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3260 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3261 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3262 pstats->mac_stx[1].t##_lo, diff.lo); \
3265 #define UPDATE_STAT64_NIG(s, t) \
3267 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3268 diff.lo, new->s##_lo, old->s##_lo); \
3269 ADD_64(estats->t##_hi, diff.hi, \
3270 estats->t##_lo, diff.lo); \
3273 /* sum[hi:lo] += add */
3274 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3277 s_hi += (s_lo < a) ? 1 : 0; \
3280 #define UPDATE_EXTEND_STAT(s) \
3282 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3283 pstats->mac_stx[1].s##_lo, \
3287 #define UPDATE_EXTEND_TSTAT(s, t) \
3289 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3290 old_tclient->s = tclient->s; \
3291 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3294 #define UPDATE_EXTEND_USTAT(s, t) \
3296 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3297 old_uclient->s = uclient->s; \
3298 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3301 #define UPDATE_EXTEND_XSTAT(s, t) \
3303 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3304 old_xclient->s = xclient->s; \
3305 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3308 /* minuend -= subtrahend */
3309 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3311 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3314 /* minuend[hi:lo] -= subtrahend */
3315 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3317 SUB_64(m_hi, 0, m_lo, s); \
3320 #define SUB_EXTEND_USTAT(s, t) \
3322 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3323 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3327 * General service functions
3330 static inline long bnx2x_hilo(u32 *hiref)
3332 u32 lo = *(hiref + 1);
3333 #if (BITS_PER_LONG == 64)
3336 return HILO_U64(hi, lo);
3343 * Init service functions
3346 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3348 if (!bp->stats_pending) {
3349 struct eth_query_ramrod_data ramrod_data = {0};
3352 ramrod_data.drv_counter = bp->stats_counter++;
3353 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3354 for_each_queue(bp, i)
3355 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3357 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3358 ((u32 *)&ramrod_data)[1],
3359 ((u32 *)&ramrod_data)[0], 0);
3361 /* stats ramrod has it's own slot on the spq */
3363 bp->stats_pending = 1;
3368 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3370 struct dmae_command *dmae = &bp->stats_dmae;
3371 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3373 *stats_comp = DMAE_COMP_VAL;
3374 if (CHIP_REV_IS_SLOW(bp))
3378 if (bp->executer_idx) {
3379 int loader_idx = PMF_DMAE_C(bp);
3381 memset(dmae, 0, sizeof(struct dmae_command));
3383 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3384 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3385 DMAE_CMD_DST_RESET |
3387 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3389 DMAE_CMD_ENDIANITY_DW_SWAP |
3391 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3393 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3394 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3395 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3396 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3397 sizeof(struct dmae_command) *
3398 (loader_idx + 1)) >> 2;
3399 dmae->dst_addr_hi = 0;
3400 dmae->len = sizeof(struct dmae_command) >> 2;
3403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3404 dmae->comp_addr_hi = 0;
3408 bnx2x_post_dmae(bp, dmae, loader_idx);
3410 } else if (bp->func_stx) {
3412 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3416 static int bnx2x_stats_comp(struct bnx2x *bp)
3418 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3422 while (*stats_comp != DMAE_COMP_VAL) {
3424 BNX2X_ERR("timeout waiting for stats finished\n");
3434 * Statistics service functions
3437 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3439 struct dmae_command *dmae;
3441 int loader_idx = PMF_DMAE_C(bp);
3442 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3445 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3446 BNX2X_ERR("BUG!\n");
3450 bp->executer_idx = 0;
3452 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3454 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3456 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3458 DMAE_CMD_ENDIANITY_DW_SWAP |
3460 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3461 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3463 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3464 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3465 dmae->src_addr_lo = bp->port.port_stx >> 2;
3466 dmae->src_addr_hi = 0;
3467 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3468 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3469 dmae->len = DMAE_LEN32_RD_MAX;
3470 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3471 dmae->comp_addr_hi = 0;
3474 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3475 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3476 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3477 dmae->src_addr_hi = 0;
3478 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3479 DMAE_LEN32_RD_MAX * 4);
3480 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3481 DMAE_LEN32_RD_MAX * 4);
3482 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3483 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3484 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_val = DMAE_COMP_VAL;
3488 bnx2x_hw_stats_post(bp);
3489 bnx2x_stats_comp(bp);
3492 static void bnx2x_port_stats_init(struct bnx2x *bp)
3494 struct dmae_command *dmae;
3495 int port = BP_PORT(bp);
3496 int vn = BP_E1HVN(bp);
3498 int loader_idx = PMF_DMAE_C(bp);
3500 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3503 if (!bp->link_vars.link_up || !bp->port.pmf) {
3504 BNX2X_ERR("BUG!\n");
3508 bp->executer_idx = 0;
3511 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3512 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3513 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3515 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3517 DMAE_CMD_ENDIANITY_DW_SWAP |
3519 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3520 (vn << DMAE_CMD_E1HVN_SHIFT));
3522 if (bp->port.port_stx) {
3524 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525 dmae->opcode = opcode;
3526 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3527 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3528 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3529 dmae->dst_addr_hi = 0;
3530 dmae->len = sizeof(struct host_port_stats) >> 2;
3531 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532 dmae->comp_addr_hi = 0;
3538 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3539 dmae->opcode = opcode;
3540 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3541 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3542 dmae->dst_addr_lo = bp->func_stx >> 2;
3543 dmae->dst_addr_hi = 0;
3544 dmae->len = sizeof(struct host_func_stats) >> 2;
3545 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3546 dmae->comp_addr_hi = 0;
3551 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3552 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3553 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3555 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3557 DMAE_CMD_ENDIANITY_DW_SWAP |
3559 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3560 (vn << DMAE_CMD_E1HVN_SHIFT));
3562 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3564 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3565 NIG_REG_INGRESS_BMAC0_MEM);
3567 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3568 BIGMAC_REGISTER_TX_STAT_GTBYT */
3569 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3570 dmae->opcode = opcode;
3571 dmae->src_addr_lo = (mac_addr +
3572 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3573 dmae->src_addr_hi = 0;
3574 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3575 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3576 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3577 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3578 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3579 dmae->comp_addr_hi = 0;
3582 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3583 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3584 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3585 dmae->opcode = opcode;
3586 dmae->src_addr_lo = (mac_addr +
3587 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3588 dmae->src_addr_hi = 0;
3589 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3590 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3591 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3592 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3593 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3594 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3595 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3596 dmae->comp_addr_hi = 0;
3599 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3601 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3603 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3604 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3605 dmae->opcode = opcode;
3606 dmae->src_addr_lo = (mac_addr +
3607 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3608 dmae->src_addr_hi = 0;
3609 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3610 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3611 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3612 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3613 dmae->comp_addr_hi = 0;
3616 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3617 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3618 dmae->opcode = opcode;
3619 dmae->src_addr_lo = (mac_addr +
3620 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3621 dmae->src_addr_hi = 0;
3622 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3623 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3624 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3625 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3627 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3628 dmae->comp_addr_hi = 0;
3631 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3632 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3633 dmae->opcode = opcode;
3634 dmae->src_addr_lo = (mac_addr +
3635 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3636 dmae->src_addr_hi = 0;
3637 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3638 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3639 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3640 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3641 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3642 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3643 dmae->comp_addr_hi = 0;
3648 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3649 dmae->opcode = opcode;
3650 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3651 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3652 dmae->src_addr_hi = 0;
3653 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3654 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3655 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3660 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661 dmae->opcode = opcode;
3662 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3663 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3664 dmae->src_addr_hi = 0;
3665 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3666 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3667 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3668 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3669 dmae->len = (2*sizeof(u32)) >> 2;
3670 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3671 dmae->comp_addr_hi = 0;
3674 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3675 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3676 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3677 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3679 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3681 DMAE_CMD_ENDIANITY_DW_SWAP |
3683 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3684 (vn << DMAE_CMD_E1HVN_SHIFT));
3685 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3686 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3687 dmae->src_addr_hi = 0;
3688 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3689 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3690 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3691 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3692 dmae->len = (2*sizeof(u32)) >> 2;
3693 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3694 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3695 dmae->comp_val = DMAE_COMP_VAL;
3700 static void bnx2x_func_stats_init(struct bnx2x *bp)
3702 struct dmae_command *dmae = &bp->stats_dmae;
3703 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3706 if (!bp->func_stx) {
3707 BNX2X_ERR("BUG!\n");
3711 bp->executer_idx = 0;
3712 memset(dmae, 0, sizeof(struct dmae_command));
3714 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3715 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3716 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3718 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3720 DMAE_CMD_ENDIANITY_DW_SWAP |
3722 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3723 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3724 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3725 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3726 dmae->dst_addr_lo = bp->func_stx >> 2;
3727 dmae->dst_addr_hi = 0;
3728 dmae->len = sizeof(struct host_func_stats) >> 2;
3729 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3730 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3731 dmae->comp_val = DMAE_COMP_VAL;
3736 static void bnx2x_stats_start(struct bnx2x *bp)
3739 bnx2x_port_stats_init(bp);
3741 else if (bp->func_stx)
3742 bnx2x_func_stats_init(bp);
3744 bnx2x_hw_stats_post(bp);
3745 bnx2x_storm_stats_post(bp);
3748 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3750 bnx2x_stats_comp(bp);
3751 bnx2x_stats_pmf_update(bp);
3752 bnx2x_stats_start(bp);
3755 static void bnx2x_stats_restart(struct bnx2x *bp)
3757 bnx2x_stats_comp(bp);
3758 bnx2x_stats_start(bp);
3761 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3763 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3764 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3765 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3771 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3772 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3773 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3774 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3775 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3776 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3777 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3778 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3779 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3780 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3781 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3782 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3783 UPDATE_STAT64(tx_stat_gt127,
3784 tx_stat_etherstatspkts65octetsto127octets);
3785 UPDATE_STAT64(tx_stat_gt255,
3786 tx_stat_etherstatspkts128octetsto255octets);
3787 UPDATE_STAT64(tx_stat_gt511,
3788 tx_stat_etherstatspkts256octetsto511octets);
3789 UPDATE_STAT64(tx_stat_gt1023,
3790 tx_stat_etherstatspkts512octetsto1023octets);
3791 UPDATE_STAT64(tx_stat_gt1518,
3792 tx_stat_etherstatspkts1024octetsto1522octets);
3793 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3794 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3795 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3796 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3797 UPDATE_STAT64(tx_stat_gterr,
3798 tx_stat_dot3statsinternalmactransmiterrors);
3799 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3801 estats->pause_frames_received_hi =
3802 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3803 estats->pause_frames_received_lo =
3804 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3806 estats->pause_frames_sent_hi =
3807 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3808 estats->pause_frames_sent_lo =
3809 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3812 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3814 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3815 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3816 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3818 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3819 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3820 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3821 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3822 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3823 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3824 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3825 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3826 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3827 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3828 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3829 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3830 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3831 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3832 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3833 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3834 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3835 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3836 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3837 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3838 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3839 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3840 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3841 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3842 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3843 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3844 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3845 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3846 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3847 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3848 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3850 estats->pause_frames_received_hi =
3851 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3852 estats->pause_frames_received_lo =
3853 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3854 ADD_64(estats->pause_frames_received_hi,
3855 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3856 estats->pause_frames_received_lo,
3857 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3859 estats->pause_frames_sent_hi =
3860 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3861 estats->pause_frames_sent_lo =
3862 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3863 ADD_64(estats->pause_frames_sent_hi,
3864 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3865 estats->pause_frames_sent_lo,
3866 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3869 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3871 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3872 struct nig_stats *old = &(bp->port.old_nig_stats);
3873 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3874 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3881 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3882 bnx2x_bmac_stats_update(bp);
3884 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3885 bnx2x_emac_stats_update(bp);
3887 else { /* unreached */
3888 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3892 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3893 new->brb_discard - old->brb_discard);
3894 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3895 new->brb_truncate - old->brb_truncate);
3897 UPDATE_STAT64_NIG(egress_mac_pkt0,
3898 etherstatspkts1024octetsto1522octets);
3899 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3901 memcpy(old, new, sizeof(struct nig_stats));
3903 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3904 sizeof(struct mac_stx));
3905 estats->brb_drop_hi = pstats->brb_drop_hi;
3906 estats->brb_drop_lo = pstats->brb_drop_lo;
3908 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3910 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3911 if (nig_timer_max != estats->nig_timer_max) {
3912 estats->nig_timer_max = nig_timer_max;
3913 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3919 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3921 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3922 struct tstorm_per_port_stats *tport =
3923 &stats->tstorm_common.port_statistics;
3924 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3925 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3928 memcpy(&(fstats->total_bytes_received_hi),
3929 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3930 sizeof(struct host_func_stats) - 2*sizeof(u32));
3931 estats->error_bytes_received_hi = 0;
3932 estats->error_bytes_received_lo = 0;
3933 estats->etherstatsoverrsizepkts_hi = 0;
3934 estats->etherstatsoverrsizepkts_lo = 0;
3935 estats->no_buff_discard_hi = 0;
3936 estats->no_buff_discard_lo = 0;
3938 for_each_rx_queue(bp, i) {
3939 struct bnx2x_fastpath *fp = &bp->fp[i];
3940 int cl_id = fp->cl_id;
3941 struct tstorm_per_client_stats *tclient =
3942 &stats->tstorm_common.client_statistics[cl_id];
3943 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3944 struct ustorm_per_client_stats *uclient =
3945 &stats->ustorm_common.client_statistics[cl_id];
3946 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3947 struct xstorm_per_client_stats *xclient =
3948 &stats->xstorm_common.client_statistics[cl_id];
3949 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3950 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3953 /* are storm stats valid? */
3954 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3955 bp->stats_counter) {
3956 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3957 " xstorm counter (%d) != stats_counter (%d)\n",
3958 i, xclient->stats_counter, bp->stats_counter);
3961 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3962 bp->stats_counter) {
3963 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3964 " tstorm counter (%d) != stats_counter (%d)\n",
3965 i, tclient->stats_counter, bp->stats_counter);
3968 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3969 bp->stats_counter) {
3970 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3971 " ustorm counter (%d) != stats_counter (%d)\n",
3972 i, uclient->stats_counter, bp->stats_counter);
3976 qstats->total_bytes_received_hi =
3977 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3978 qstats->total_bytes_received_lo =
3979 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3981 ADD_64(qstats->total_bytes_received_hi,
3982 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3983 qstats->total_bytes_received_lo,
3984 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3986 ADD_64(qstats->total_bytes_received_hi,
3987 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3988 qstats->total_bytes_received_lo,
3989 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3991 qstats->valid_bytes_received_hi =
3992 qstats->total_bytes_received_hi;
3993 qstats->valid_bytes_received_lo =
3994 qstats->total_bytes_received_lo;
3996 qstats->error_bytes_received_hi =
3997 le32_to_cpu(tclient->rcv_error_bytes.hi);
3998 qstats->error_bytes_received_lo =
3999 le32_to_cpu(tclient->rcv_error_bytes.lo);
4001 ADD_64(qstats->total_bytes_received_hi,
4002 qstats->error_bytes_received_hi,
4003 qstats->total_bytes_received_lo,
4004 qstats->error_bytes_received_lo);
4006 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4007 total_unicast_packets_received);
4008 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4009 total_multicast_packets_received);
4010 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4011 total_broadcast_packets_received);
4012 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4013 etherstatsoverrsizepkts);
4014 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4016 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4017 total_unicast_packets_received);
4018 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4019 total_multicast_packets_received);
4020 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4021 total_broadcast_packets_received);
4022 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4023 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4024 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4026 qstats->total_bytes_transmitted_hi =
4027 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4028 qstats->total_bytes_transmitted_lo =
4029 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4031 ADD_64(qstats->total_bytes_transmitted_hi,
4032 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4033 qstats->total_bytes_transmitted_lo,
4034 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4036 ADD_64(qstats->total_bytes_transmitted_hi,
4037 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4038 qstats->total_bytes_transmitted_lo,
4039 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4041 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4042 total_unicast_packets_transmitted);
4043 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4044 total_multicast_packets_transmitted);
4045 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4046 total_broadcast_packets_transmitted);
4048 old_tclient->checksum_discard = tclient->checksum_discard;
4049 old_tclient->ttl0_discard = tclient->ttl0_discard;
4051 ADD_64(fstats->total_bytes_received_hi,
4052 qstats->total_bytes_received_hi,
4053 fstats->total_bytes_received_lo,
4054 qstats->total_bytes_received_lo);
4055 ADD_64(fstats->total_bytes_transmitted_hi,
4056 qstats->total_bytes_transmitted_hi,
4057 fstats->total_bytes_transmitted_lo,
4058 qstats->total_bytes_transmitted_lo);
4059 ADD_64(fstats->total_unicast_packets_received_hi,
4060 qstats->total_unicast_packets_received_hi,
4061 fstats->total_unicast_packets_received_lo,
4062 qstats->total_unicast_packets_received_lo);
4063 ADD_64(fstats->total_multicast_packets_received_hi,
4064 qstats->total_multicast_packets_received_hi,
4065 fstats->total_multicast_packets_received_lo,
4066 qstats->total_multicast_packets_received_lo);
4067 ADD_64(fstats->total_broadcast_packets_received_hi,
4068 qstats->total_broadcast_packets_received_hi,
4069 fstats->total_broadcast_packets_received_lo,
4070 qstats->total_broadcast_packets_received_lo);
4071 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4072 qstats->total_unicast_packets_transmitted_hi,
4073 fstats->total_unicast_packets_transmitted_lo,
4074 qstats->total_unicast_packets_transmitted_lo);
4075 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4076 qstats->total_multicast_packets_transmitted_hi,
4077 fstats->total_multicast_packets_transmitted_lo,
4078 qstats->total_multicast_packets_transmitted_lo);
4079 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4080 qstats->total_broadcast_packets_transmitted_hi,
4081 fstats->total_broadcast_packets_transmitted_lo,
4082 qstats->total_broadcast_packets_transmitted_lo);
4083 ADD_64(fstats->valid_bytes_received_hi,
4084 qstats->valid_bytes_received_hi,
4085 fstats->valid_bytes_received_lo,
4086 qstats->valid_bytes_received_lo);
4088 ADD_64(estats->error_bytes_received_hi,
4089 qstats->error_bytes_received_hi,
4090 estats->error_bytes_received_lo,
4091 qstats->error_bytes_received_lo);
4092 ADD_64(estats->etherstatsoverrsizepkts_hi,
4093 qstats->etherstatsoverrsizepkts_hi,
4094 estats->etherstatsoverrsizepkts_lo,
4095 qstats->etherstatsoverrsizepkts_lo);
4096 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4097 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4100 ADD_64(fstats->total_bytes_received_hi,
4101 estats->rx_stat_ifhcinbadoctets_hi,
4102 fstats->total_bytes_received_lo,
4103 estats->rx_stat_ifhcinbadoctets_lo);
4105 memcpy(estats, &(fstats->total_bytes_received_hi),
4106 sizeof(struct host_func_stats) - 2*sizeof(u32));
4108 ADD_64(estats->etherstatsoverrsizepkts_hi,
4109 estats->rx_stat_dot3statsframestoolong_hi,
4110 estats->etherstatsoverrsizepkts_lo,
4111 estats->rx_stat_dot3statsframestoolong_lo);
4112 ADD_64(estats->error_bytes_received_hi,
4113 estats->rx_stat_ifhcinbadoctets_hi,
4114 estats->error_bytes_received_lo,
4115 estats->rx_stat_ifhcinbadoctets_lo);
4118 estats->mac_filter_discard =
4119 le32_to_cpu(tport->mac_filter_discard);
4120 estats->xxoverflow_discard =
4121 le32_to_cpu(tport->xxoverflow_discard);
4122 estats->brb_truncate_discard =
4123 le32_to_cpu(tport->brb_truncate_discard);
4124 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4127 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4129 bp->stats_pending = 0;
4134 static void bnx2x_net_stats_update(struct bnx2x *bp)
4136 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4137 struct net_device_stats *nstats = &bp->dev->stats;
4140 nstats->rx_packets =
4141 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4142 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4143 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4145 nstats->tx_packets =
4146 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4147 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4148 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4150 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4152 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4154 nstats->rx_dropped = estats->mac_discard;
4155 for_each_rx_queue(bp, i)
4156 nstats->rx_dropped +=
4157 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4159 nstats->tx_dropped = 0;
4162 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4164 nstats->collisions =
4165 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4167 nstats->rx_length_errors =
4168 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4169 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4170 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4171 bnx2x_hilo(&estats->brb_truncate_hi);
4172 nstats->rx_crc_errors =
4173 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4174 nstats->rx_frame_errors =
4175 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4176 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4177 nstats->rx_missed_errors = estats->xxoverflow_discard;
4179 nstats->rx_errors = nstats->rx_length_errors +
4180 nstats->rx_over_errors +
4181 nstats->rx_crc_errors +
4182 nstats->rx_frame_errors +
4183 nstats->rx_fifo_errors +
4184 nstats->rx_missed_errors;
4186 nstats->tx_aborted_errors =
4187 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4188 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4189 nstats->tx_carrier_errors =
4190 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4191 nstats->tx_fifo_errors = 0;
4192 nstats->tx_heartbeat_errors = 0;
4193 nstats->tx_window_errors = 0;
4195 nstats->tx_errors = nstats->tx_aborted_errors +
4196 nstats->tx_carrier_errors +
4197 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4200 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4202 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4205 estats->driver_xoff = 0;
4206 estats->rx_err_discard_pkt = 0;
4207 estats->rx_skb_alloc_failed = 0;
4208 estats->hw_csum_err = 0;
4209 for_each_rx_queue(bp, i) {
4210 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4212 estats->driver_xoff += qstats->driver_xoff;
4213 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4214 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4215 estats->hw_csum_err += qstats->hw_csum_err;
4219 static void bnx2x_stats_update(struct bnx2x *bp)
4221 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4223 if (*stats_comp != DMAE_COMP_VAL)
4227 bnx2x_hw_stats_update(bp);
4229 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4230 BNX2X_ERR("storm stats were not updated for 3 times\n");
4235 bnx2x_net_stats_update(bp);
4236 bnx2x_drv_stats_update(bp);
4238 if (bp->msglevel & NETIF_MSG_TIMER) {
4239 struct bnx2x_fastpath *fp0_rx = bp->fp;
4240 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4241 struct tstorm_per_client_stats *old_tclient =
4242 &bp->fp->old_tclient;
4243 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4244 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4245 struct net_device_stats *nstats = &bp->dev->stats;
4248 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4249 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4251 bnx2x_tx_avail(fp0_tx),
4252 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4253 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4255 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4256 fp0_rx->rx_comp_cons),
4257 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4258 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4259 "brb truncate %u\n",
4260 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4261 qstats->driver_xoff,
4262 estats->brb_drop_lo, estats->brb_truncate_lo);
4263 printk(KERN_DEBUG "tstats: checksum_discard %u "
4264 "packets_too_big_discard %lu no_buff_discard %lu "
4265 "mac_discard %u mac_filter_discard %u "
4266 "xxovrflow_discard %u brb_truncate_discard %u "
4267 "ttl0_discard %u\n",
4268 le32_to_cpu(old_tclient->checksum_discard),
4269 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4270 bnx2x_hilo(&qstats->no_buff_discard_hi),
4271 estats->mac_discard, estats->mac_filter_discard,
4272 estats->xxoverflow_discard, estats->brb_truncate_discard,
4273 le32_to_cpu(old_tclient->ttl0_discard));
4275 for_each_queue(bp, i) {
4276 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4277 bnx2x_fp(bp, i, tx_pkt),
4278 bnx2x_fp(bp, i, rx_pkt),
4279 bnx2x_fp(bp, i, rx_calls));
4283 bnx2x_hw_stats_post(bp);
4284 bnx2x_storm_stats_post(bp);
4287 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4289 struct dmae_command *dmae;
4291 int loader_idx = PMF_DMAE_C(bp);
4292 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4294 bp->executer_idx = 0;
4296 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4302 DMAE_CMD_ENDIANITY_DW_SWAP |
4304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4307 if (bp->port.port_stx) {
4309 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4311 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4313 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4314 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4315 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4316 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4317 dmae->dst_addr_hi = 0;
4318 dmae->len = sizeof(struct host_port_stats) >> 2;
4320 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4321 dmae->comp_addr_hi = 0;
4324 dmae->comp_addr_lo =
4325 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4326 dmae->comp_addr_hi =
4327 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4328 dmae->comp_val = DMAE_COMP_VAL;
4336 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4337 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4338 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4339 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4340 dmae->dst_addr_lo = bp->func_stx >> 2;
4341 dmae->dst_addr_hi = 0;
4342 dmae->len = sizeof(struct host_func_stats) >> 2;
4343 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4344 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4345 dmae->comp_val = DMAE_COMP_VAL;
4351 static void bnx2x_stats_stop(struct bnx2x *bp)
4355 bnx2x_stats_comp(bp);
4358 update = (bnx2x_hw_stats_update(bp) == 0);
4360 update |= (bnx2x_storm_stats_update(bp) == 0);
4363 bnx2x_net_stats_update(bp);
4366 bnx2x_port_stats_stop(bp);
4368 bnx2x_hw_stats_post(bp);
4369 bnx2x_stats_comp(bp);
4373 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4377 static const struct {
4378 void (*action)(struct bnx2x *bp);
4379 enum bnx2x_stats_state next_state;
4380 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4383 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4384 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4385 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4386 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4389 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4390 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4391 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4392 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4396 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4398 enum bnx2x_stats_state state = bp->stats_state;
4400 bnx2x_stats_stm[state][event].action(bp);
4401 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4403 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4404 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4405 state, event, bp->stats_state);
4408 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4410 struct dmae_command *dmae;
4411 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4414 if (!bp->port.pmf || !bp->port.port_stx) {
4415 BNX2X_ERR("BUG!\n");
4419 bp->executer_idx = 0;
4421 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4422 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4423 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4424 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4426 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4428 DMAE_CMD_ENDIANITY_DW_SWAP |
4430 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4431 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4432 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4433 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4434 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4435 dmae->dst_addr_hi = 0;
4436 dmae->len = sizeof(struct host_port_stats) >> 2;
4437 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4438 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4439 dmae->comp_val = DMAE_COMP_VAL;
4442 bnx2x_hw_stats_post(bp);
4443 bnx2x_stats_comp(bp);
4446 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4448 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4449 int port = BP_PORT(bp);
4454 if (!bp->port.pmf || !bp->func_stx) {
4455 BNX2X_ERR("BUG!\n");
4459 /* save our func_stx */
4460 func_stx = bp->func_stx;
4462 for (vn = VN_0; vn < vn_max; vn++) {
4465 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4466 bnx2x_func_stats_init(bp);
4467 bnx2x_hw_stats_post(bp);
4468 bnx2x_stats_comp(bp);
4471 /* restore our func_stx */
4472 bp->func_stx = func_stx;
4475 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4477 struct dmae_command *dmae = &bp->stats_dmae;
4478 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4481 if (!bp->func_stx) {
4482 BNX2X_ERR("BUG!\n");
4486 bp->executer_idx = 0;
4487 memset(dmae, 0, sizeof(struct dmae_command));
4489 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4490 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4491 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4493 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4495 DMAE_CMD_ENDIANITY_DW_SWAP |
4497 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4498 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4499 dmae->src_addr_lo = bp->func_stx >> 2;
4500 dmae->src_addr_hi = 0;
4501 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4502 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4503 dmae->len = sizeof(struct host_func_stats) >> 2;
4504 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4505 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4506 dmae->comp_val = DMAE_COMP_VAL;
4509 bnx2x_hw_stats_post(bp);
4510 bnx2x_stats_comp(bp);
4513 static void bnx2x_stats_init(struct bnx2x *bp)
4515 int port = BP_PORT(bp);
4516 int func = BP_FUNC(bp);
4519 bp->stats_pending = 0;
4520 bp->executer_idx = 0;
4521 bp->stats_counter = 0;
4523 /* port and func stats for management */
4524 if (!BP_NOMCP(bp)) {
4525 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4526 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4529 bp->port.port_stx = 0;
4532 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4533 bp->port.port_stx, bp->func_stx);
4536 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4537 bp->port.old_nig_stats.brb_discard =
4538 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4539 bp->port.old_nig_stats.brb_truncate =
4540 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4541 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4542 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4543 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4544 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4546 /* function stats */
4547 for_each_queue(bp, i) {
4548 struct bnx2x_fastpath *fp = &bp->fp[i];
4550 memset(&fp->old_tclient, 0,
4551 sizeof(struct tstorm_per_client_stats));
4552 memset(&fp->old_uclient, 0,
4553 sizeof(struct ustorm_per_client_stats));
4554 memset(&fp->old_xclient, 0,
4555 sizeof(struct xstorm_per_client_stats));
4556 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4559 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4560 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4562 bp->stats_state = STATS_STATE_DISABLED;
4565 if (bp->port.port_stx)
4566 bnx2x_port_stats_base_init(bp);
4569 bnx2x_func_stats_base_init(bp);
4571 } else if (bp->func_stx)
4572 bnx2x_func_stats_base_update(bp);
4575 static void bnx2x_timer(unsigned long data)
4577 struct bnx2x *bp = (struct bnx2x *) data;
4579 if (!netif_running(bp->dev))
4582 if (atomic_read(&bp->intr_sem) != 0)
4586 struct bnx2x_fastpath *fp = &bp->fp[0];
4590 rc = bnx2x_rx_int(fp, 1000);
4593 if (!BP_NOMCP(bp)) {
4594 int func = BP_FUNC(bp);
4598 ++bp->fw_drv_pulse_wr_seq;
4599 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4600 /* TBD - add SYSTEM_TIME */
4601 drv_pulse = bp->fw_drv_pulse_wr_seq;
4602 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4604 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4605 MCP_PULSE_SEQ_MASK);
4606 /* The delta between driver pulse and mcp response
4607 * should be 1 (before mcp response) or 0 (after mcp response)
4609 if ((drv_pulse != mcp_pulse) &&
4610 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4611 /* someone lost a heartbeat... */
4612 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4613 drv_pulse, mcp_pulse);
4617 if ((bp->state == BNX2X_STATE_OPEN) ||
4618 (bp->state == BNX2X_STATE_DISABLED))
4619 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4622 mod_timer(&bp->timer, jiffies + bp->current_interval);
4625 /* end of Statistics */
4630 * nic init service functions
4633 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4635 int port = BP_PORT(bp);
4638 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4639 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4640 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4641 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4642 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4643 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4646 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4647 dma_addr_t mapping, int sb_id)
4649 int port = BP_PORT(bp);
4650 int func = BP_FUNC(bp);
4655 section = ((u64)mapping) + offsetof(struct host_status_block,
4657 sb->u_status_block.status_block_id = sb_id;
4659 REG_WR(bp, BAR_CSTRORM_INTMEM +
4660 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4661 REG_WR(bp, BAR_CSTRORM_INTMEM +
4662 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4664 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4665 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4667 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4668 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4669 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4672 section = ((u64)mapping) + offsetof(struct host_status_block,
4674 sb->c_status_block.status_block_id = sb_id;
4676 REG_WR(bp, BAR_CSTRORM_INTMEM +
4677 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4678 REG_WR(bp, BAR_CSTRORM_INTMEM +
4679 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4681 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4682 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4684 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4685 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4686 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4688 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4691 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4693 int func = BP_FUNC(bp);
4695 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4696 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4697 sizeof(struct tstorm_def_status_block)/4);
4698 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4699 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4700 sizeof(struct cstorm_def_status_block_u)/4);
4701 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4703 sizeof(struct cstorm_def_status_block_c)/4);
4704 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4705 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4706 sizeof(struct xstorm_def_status_block)/4);
4709 static void bnx2x_init_def_sb(struct bnx2x *bp,
4710 struct host_def_status_block *def_sb,
4711 dma_addr_t mapping, int sb_id)
4713 int port = BP_PORT(bp);
4714 int func = BP_FUNC(bp);
4715 int index, val, reg_offset;
4719 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4720 atten_status_block);
4721 def_sb->atten_status_block.status_block_id = sb_id;
4725 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4726 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4728 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4729 bp->attn_group[index].sig[0] = REG_RD(bp,
4730 reg_offset + 0x10*index);
4731 bp->attn_group[index].sig[1] = REG_RD(bp,
4732 reg_offset + 0x4 + 0x10*index);
4733 bp->attn_group[index].sig[2] = REG_RD(bp,
4734 reg_offset + 0x8 + 0x10*index);
4735 bp->attn_group[index].sig[3] = REG_RD(bp,
4736 reg_offset + 0xc + 0x10*index);
4739 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4740 HC_REG_ATTN_MSG0_ADDR_L);
4742 REG_WR(bp, reg_offset, U64_LO(section));
4743 REG_WR(bp, reg_offset + 4, U64_HI(section));
4745 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4747 val = REG_RD(bp, reg_offset);
4749 REG_WR(bp, reg_offset, val);
4752 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4753 u_def_status_block);
4754 def_sb->u_def_status_block.status_block_id = sb_id;
4756 REG_WR(bp, BAR_CSTRORM_INTMEM +
4757 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4758 REG_WR(bp, BAR_CSTRORM_INTMEM +
4759 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4761 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4762 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4764 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4765 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4766 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4769 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4770 c_def_status_block);
4771 def_sb->c_def_status_block.status_block_id = sb_id;
4773 REG_WR(bp, BAR_CSTRORM_INTMEM +
4774 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4775 REG_WR(bp, BAR_CSTRORM_INTMEM +
4776 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4778 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4779 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4781 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4782 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4783 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4786 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4787 t_def_status_block);
4788 def_sb->t_def_status_block.status_block_id = sb_id;
4790 REG_WR(bp, BAR_TSTRORM_INTMEM +
4791 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4792 REG_WR(bp, BAR_TSTRORM_INTMEM +
4793 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4795 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4796 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4798 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4799 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4800 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4803 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4804 x_def_status_block);
4805 def_sb->x_def_status_block.status_block_id = sb_id;
4807 REG_WR(bp, BAR_XSTRORM_INTMEM +
4808 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4809 REG_WR(bp, BAR_XSTRORM_INTMEM +
4810 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4812 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4813 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4815 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4816 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4817 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4819 bp->stats_pending = 0;
4820 bp->set_mac_pending = 0;
4822 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4825 static void bnx2x_update_coalesce(struct bnx2x *bp)
4827 int port = BP_PORT(bp);
4830 for_each_queue(bp, i) {
4831 int sb_id = bp->fp[i].sb_id;
4833 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4834 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4835 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4836 U_SB_ETH_RX_CQ_INDEX),
4838 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4839 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4840 U_SB_ETH_RX_CQ_INDEX),
4841 (bp->rx_ticks/12) ? 0 : 1);
4843 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4844 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4845 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4846 C_SB_ETH_TX_CQ_INDEX),
4848 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4849 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4850 C_SB_ETH_TX_CQ_INDEX),
4851 (bp->tx_ticks/12) ? 0 : 1);
4855 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4856 struct bnx2x_fastpath *fp, int last)
4860 for (i = 0; i < last; i++) {
4861 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4862 struct sk_buff *skb = rx_buf->skb;
4865 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4869 if (fp->tpa_state[i] == BNX2X_TPA_START)
4870 pci_unmap_single(bp->pdev,
4871 pci_unmap_addr(rx_buf, mapping),
4872 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4879 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4881 int func = BP_FUNC(bp);
4882 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4883 ETH_MAX_AGGREGATION_QUEUES_E1H;
4884 u16 ring_prod, cqe_ring_prod;
4887 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4889 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4891 if (bp->flags & TPA_ENABLE_FLAG) {
4893 for_each_rx_queue(bp, j) {
4894 struct bnx2x_fastpath *fp = &bp->fp[j];
4896 for (i = 0; i < max_agg_queues; i++) {
4897 fp->tpa_pool[i].skb =
4898 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4899 if (!fp->tpa_pool[i].skb) {
4900 BNX2X_ERR("Failed to allocate TPA "
4901 "skb pool for queue[%d] - "
4902 "disabling TPA on this "
4904 bnx2x_free_tpa_pool(bp, fp, i);
4905 fp->disable_tpa = 1;
4908 pci_unmap_addr_set((struct sw_rx_bd *)
4909 &bp->fp->tpa_pool[i],
4911 fp->tpa_state[i] = BNX2X_TPA_STOP;
4916 for_each_rx_queue(bp, j) {
4917 struct bnx2x_fastpath *fp = &bp->fp[j];
4920 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4921 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4923 /* Mark queue as Rx */
4924 fp->is_rx_queue = 1;
4926 /* "next page" elements initialization */
4928 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4929 struct eth_rx_sge *sge;
4931 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4933 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4934 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4936 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4937 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4940 bnx2x_init_sge_ring_bit_mask(fp);
4943 for (i = 1; i <= NUM_RX_RINGS; i++) {
4944 struct eth_rx_bd *rx_bd;
4946 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4948 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4949 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4951 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4952 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4956 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4957 struct eth_rx_cqe_next_page *nextpg;
4959 nextpg = (struct eth_rx_cqe_next_page *)
4960 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4962 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4963 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4965 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4966 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4969 /* Allocate SGEs and initialize the ring elements */
4970 for (i = 0, ring_prod = 0;
4971 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4973 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4974 BNX2X_ERR("was only able to allocate "
4976 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4977 /* Cleanup already allocated elements */
4978 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4979 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4980 fp->disable_tpa = 1;
4984 ring_prod = NEXT_SGE_IDX(ring_prod);
4986 fp->rx_sge_prod = ring_prod;
4988 /* Allocate BDs and initialize BD ring */
4989 fp->rx_comp_cons = 0;
4990 cqe_ring_prod = ring_prod = 0;
4991 for (i = 0; i < bp->rx_ring_size; i++) {
4992 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4993 BNX2X_ERR("was only able to allocate "
4994 "%d rx skbs on queue[%d]\n", i, j);
4995 fp->eth_q_stats.rx_skb_alloc_failed++;
4998 ring_prod = NEXT_RX_IDX(ring_prod);
4999 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5000 WARN_ON(ring_prod <= i);
5003 fp->rx_bd_prod = ring_prod;
5004 /* must not have more available CQEs than BDs */
5005 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5007 fp->rx_pkt = fp->rx_calls = 0;
5010 * this will generate an interrupt (to the TSTORM)
5011 * must only be done after chip is initialized
5013 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5018 REG_WR(bp, BAR_USTRORM_INTMEM +
5019 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5020 U64_LO(fp->rx_comp_mapping));
5021 REG_WR(bp, BAR_USTRORM_INTMEM +
5022 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5023 U64_HI(fp->rx_comp_mapping));
5027 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5031 for_each_tx_queue(bp, j) {
5032 struct bnx2x_fastpath *fp = &bp->fp[j];
5034 for (i = 1; i <= NUM_TX_RINGS; i++) {
5035 struct eth_tx_next_bd *tx_next_bd =
5036 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5038 tx_next_bd->addr_hi =
5039 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5040 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5041 tx_next_bd->addr_lo =
5042 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5043 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5046 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5047 fp->tx_db.data.zero_fill1 = 0;
5048 fp->tx_db.data.prod = 0;
5050 fp->tx_pkt_prod = 0;
5051 fp->tx_pkt_cons = 0;
5054 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5058 /* clean tx statistics */
5059 for_each_rx_queue(bp, i)
5060 bnx2x_fp(bp, i, tx_pkt) = 0;
5063 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5065 int func = BP_FUNC(bp);
5067 spin_lock_init(&bp->spq_lock);
5069 bp->spq_left = MAX_SPQ_PENDING;
5070 bp->spq_prod_idx = 0;
5071 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5072 bp->spq_prod_bd = bp->spq;
5073 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5075 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5076 U64_LO(bp->spq_mapping));
5078 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5079 U64_HI(bp->spq_mapping));
5081 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5085 static void bnx2x_init_context(struct bnx2x *bp)
5089 for_each_rx_queue(bp, i) {
5090 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5091 struct bnx2x_fastpath *fp = &bp->fp[i];
5092 u8 cl_id = fp->cl_id;
5094 context->ustorm_st_context.common.sb_index_numbers =
5095 BNX2X_RX_SB_INDEX_NUM;
5096 context->ustorm_st_context.common.clientId = cl_id;
5097 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5098 context->ustorm_st_context.common.flags =
5099 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5100 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5101 context->ustorm_st_context.common.statistics_counter_id =
5103 context->ustorm_st_context.common.mc_alignment_log_size =
5104 BNX2X_RX_ALIGN_SHIFT;
5105 context->ustorm_st_context.common.bd_buff_size =
5107 context->ustorm_st_context.common.bd_page_base_hi =
5108 U64_HI(fp->rx_desc_mapping);
5109 context->ustorm_st_context.common.bd_page_base_lo =
5110 U64_LO(fp->rx_desc_mapping);
5111 if (!fp->disable_tpa) {
5112 context->ustorm_st_context.common.flags |=
5113 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5114 context->ustorm_st_context.common.sge_buff_size =
5115 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5117 context->ustorm_st_context.common.sge_page_base_hi =
5118 U64_HI(fp->rx_sge_mapping);
5119 context->ustorm_st_context.common.sge_page_base_lo =
5120 U64_LO(fp->rx_sge_mapping);
5122 context->ustorm_st_context.common.max_sges_for_packet =
5123 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5124 context->ustorm_st_context.common.max_sges_for_packet =
5125 ((context->ustorm_st_context.common.
5126 max_sges_for_packet + PAGES_PER_SGE - 1) &
5127 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5130 context->ustorm_ag_context.cdu_usage =
5131 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5132 CDU_REGION_NUMBER_UCM_AG,
5133 ETH_CONNECTION_TYPE);
5135 context->xstorm_ag_context.cdu_reserved =
5136 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5137 CDU_REGION_NUMBER_XCM_AG,
5138 ETH_CONNECTION_TYPE);
5141 for_each_tx_queue(bp, i) {
5142 struct bnx2x_fastpath *fp = &bp->fp[i];
5143 struct eth_context *context =
5144 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5146 context->cstorm_st_context.sb_index_number =
5147 C_SB_ETH_TX_CQ_INDEX;
5148 context->cstorm_st_context.status_block_id = fp->sb_id;
5150 context->xstorm_st_context.tx_bd_page_base_hi =
5151 U64_HI(fp->tx_desc_mapping);
5152 context->xstorm_st_context.tx_bd_page_base_lo =
5153 U64_LO(fp->tx_desc_mapping);
5154 context->xstorm_st_context.statistics_data = (fp->cl_id |
5155 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5159 static void bnx2x_init_ind_table(struct bnx2x *bp)
5161 int func = BP_FUNC(bp);
5164 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5168 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5169 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5170 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5171 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5172 bp->fp->cl_id + (i % bp->num_rx_queues));
5175 static void bnx2x_set_client_config(struct bnx2x *bp)
5177 struct tstorm_eth_client_config tstorm_client = {0};
5178 int port = BP_PORT(bp);
5181 tstorm_client.mtu = bp->dev->mtu;
5182 tstorm_client.config_flags =
5183 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5184 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5186 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5187 tstorm_client.config_flags |=
5188 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5189 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5193 for_each_queue(bp, i) {
5194 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5196 REG_WR(bp, BAR_TSTRORM_INTMEM +
5197 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5198 ((u32 *)&tstorm_client)[0]);
5199 REG_WR(bp, BAR_TSTRORM_INTMEM +
5200 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5201 ((u32 *)&tstorm_client)[1]);
5204 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5205 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5208 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5210 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5211 int mode = bp->rx_mode;
5212 int mask = (1 << BP_L_ID(bp));
5213 int func = BP_FUNC(bp);
5214 int port = BP_PORT(bp);
5216 /* All but management unicast packets should pass to the host as well */
5218 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5219 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5220 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5221 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5223 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5226 case BNX2X_RX_MODE_NONE: /* no Rx */
5227 tstorm_mac_filter.ucast_drop_all = mask;
5228 tstorm_mac_filter.mcast_drop_all = mask;
5229 tstorm_mac_filter.bcast_drop_all = mask;
5232 case BNX2X_RX_MODE_NORMAL:
5233 tstorm_mac_filter.bcast_accept_all = mask;
5236 case BNX2X_RX_MODE_ALLMULTI:
5237 tstorm_mac_filter.mcast_accept_all = mask;
5238 tstorm_mac_filter.bcast_accept_all = mask;
5241 case BNX2X_RX_MODE_PROMISC:
5242 tstorm_mac_filter.ucast_accept_all = mask;
5243 tstorm_mac_filter.mcast_accept_all = mask;
5244 tstorm_mac_filter.bcast_accept_all = mask;
5245 /* pass management unicast packets as well */
5246 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5250 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5255 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5258 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5259 REG_WR(bp, BAR_TSTRORM_INTMEM +
5260 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5261 ((u32 *)&tstorm_mac_filter)[i]);
5263 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5264 ((u32 *)&tstorm_mac_filter)[i]); */
5267 if (mode != BNX2X_RX_MODE_NONE)
5268 bnx2x_set_client_config(bp);
5271 static void bnx2x_init_internal_common(struct bnx2x *bp)
5275 /* Zero this manually as its initialization is
5276 currently missing in the initTool */
5277 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5278 REG_WR(bp, BAR_USTRORM_INTMEM +
5279 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5282 static void bnx2x_init_internal_port(struct bnx2x *bp)
5284 int port = BP_PORT(bp);
5287 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5289 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5290 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5291 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5294 static void bnx2x_init_internal_func(struct bnx2x *bp)
5296 struct tstorm_eth_function_common_config tstorm_config = {0};
5297 struct stats_indication_flags stats_flags = {0};
5298 int port = BP_PORT(bp);
5299 int func = BP_FUNC(bp);
5305 tstorm_config.config_flags = MULTI_FLAGS(bp);
5306 tstorm_config.rss_result_mask = MULTI_MASK;
5309 /* Enable TPA if needed */
5310 if (bp->flags & TPA_ENABLE_FLAG)
5311 tstorm_config.config_flags |=
5312 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5315 tstorm_config.config_flags |=
5316 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5318 tstorm_config.leading_client_id = BP_L_ID(bp);
5320 REG_WR(bp, BAR_TSTRORM_INTMEM +
5321 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5322 (*(u32 *)&tstorm_config));
5324 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5325 bnx2x_set_storm_rx_mode(bp);
5327 for_each_queue(bp, i) {
5328 u8 cl_id = bp->fp[i].cl_id;
5330 /* reset xstorm per client statistics */
5331 offset = BAR_XSTRORM_INTMEM +
5332 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5334 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5335 REG_WR(bp, offset + j*4, 0);
5337 /* reset tstorm per client statistics */
5338 offset = BAR_TSTRORM_INTMEM +
5339 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5341 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5342 REG_WR(bp, offset + j*4, 0);
5344 /* reset ustorm per client statistics */
5345 offset = BAR_USTRORM_INTMEM +
5346 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5348 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5349 REG_WR(bp, offset + j*4, 0);
5352 /* Init statistics related context */
5353 stats_flags.collect_eth = 1;
5355 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5356 ((u32 *)&stats_flags)[0]);
5357 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5358 ((u32 *)&stats_flags)[1]);
5360 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5361 ((u32 *)&stats_flags)[0]);
5362 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5363 ((u32 *)&stats_flags)[1]);
5365 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5366 ((u32 *)&stats_flags)[0]);
5367 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5368 ((u32 *)&stats_flags)[1]);
5370 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5371 ((u32 *)&stats_flags)[0]);
5372 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5373 ((u32 *)&stats_flags)[1]);
5375 REG_WR(bp, BAR_XSTRORM_INTMEM +
5376 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5377 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5378 REG_WR(bp, BAR_XSTRORM_INTMEM +
5379 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5380 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5382 REG_WR(bp, BAR_TSTRORM_INTMEM +
5383 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5384 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5385 REG_WR(bp, BAR_TSTRORM_INTMEM +
5386 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5387 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5389 REG_WR(bp, BAR_USTRORM_INTMEM +
5390 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5391 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5392 REG_WR(bp, BAR_USTRORM_INTMEM +
5393 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5394 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5396 if (CHIP_IS_E1H(bp)) {
5397 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5399 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5401 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5403 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5406 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5410 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5412 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5413 SGE_PAGE_SIZE * PAGES_PER_SGE),
5415 for_each_rx_queue(bp, i) {
5416 struct bnx2x_fastpath *fp = &bp->fp[i];
5418 REG_WR(bp, BAR_USTRORM_INTMEM +
5419 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5420 U64_LO(fp->rx_comp_mapping));
5421 REG_WR(bp, BAR_USTRORM_INTMEM +
5422 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5423 U64_HI(fp->rx_comp_mapping));
5426 REG_WR(bp, BAR_USTRORM_INTMEM +
5427 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5428 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5429 REG_WR(bp, BAR_USTRORM_INTMEM +
5430 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5431 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5433 REG_WR16(bp, BAR_USTRORM_INTMEM +
5434 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5438 /* dropless flow control */
5439 if (CHIP_IS_E1H(bp)) {
5440 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5442 rx_pause.bd_thr_low = 250;
5443 rx_pause.cqe_thr_low = 250;
5445 rx_pause.sge_thr_low = 0;
5446 rx_pause.bd_thr_high = 350;
5447 rx_pause.cqe_thr_high = 350;
5448 rx_pause.sge_thr_high = 0;
5450 for_each_rx_queue(bp, i) {
5451 struct bnx2x_fastpath *fp = &bp->fp[i];
5453 if (!fp->disable_tpa) {
5454 rx_pause.sge_thr_low = 150;
5455 rx_pause.sge_thr_high = 250;
5459 offset = BAR_USTRORM_INTMEM +
5460 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5463 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5465 REG_WR(bp, offset + j*4,
5466 ((u32 *)&rx_pause)[j]);
5470 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5472 /* Init rate shaping and fairness contexts */
5476 /* During init there is no active link
5477 Until link is up, set link rate to 10Gbps */
5478 bp->link_vars.line_speed = SPEED_10000;
5479 bnx2x_init_port_minmax(bp);
5481 bnx2x_calc_vn_weight_sum(bp);
5483 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5484 bnx2x_init_vn_minmax(bp, 2*vn + port);
5486 /* Enable rate shaping and fairness */
5487 bp->cmng.flags.cmng_enables =
5488 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5489 if (bp->vn_weight_sum)
5490 bp->cmng.flags.cmng_enables |=
5491 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5493 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5494 " fairness will be disabled\n");
5496 /* rate shaping and fairness are disabled */
5498 "single function mode minmax will be disabled\n");
5502 /* Store it to internal memory */
5504 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5505 REG_WR(bp, BAR_XSTRORM_INTMEM +
5506 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5507 ((u32 *)(&bp->cmng))[i]);
5510 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5512 switch (load_code) {
5513 case FW_MSG_CODE_DRV_LOAD_COMMON:
5514 bnx2x_init_internal_common(bp);
5517 case FW_MSG_CODE_DRV_LOAD_PORT:
5518 bnx2x_init_internal_port(bp);
5521 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5522 bnx2x_init_internal_func(bp);
5526 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5531 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5535 for_each_queue(bp, i) {
5536 struct bnx2x_fastpath *fp = &bp->fp[i];
5539 fp->state = BNX2X_FP_STATE_CLOSED;
5541 fp->cl_id = BP_L_ID(bp) + i;
5542 fp->sb_id = fp->cl_id;
5543 /* Suitable Rx and Tx SBs are served by the same client */
5544 if (i >= bp->num_rx_queues)
5545 fp->cl_id -= bp->num_rx_queues;
5547 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5548 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5549 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5551 bnx2x_update_fpsb_idx(fp);
5554 /* ensure status block indices were read */
5558 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5560 bnx2x_update_dsb_idx(bp);
5561 bnx2x_update_coalesce(bp);
5562 bnx2x_init_rx_rings(bp);
5563 bnx2x_init_tx_ring(bp);
5564 bnx2x_init_sp_ring(bp);
5565 bnx2x_init_context(bp);
5566 bnx2x_init_internal(bp, load_code);
5567 bnx2x_init_ind_table(bp);
5568 bnx2x_stats_init(bp);
5570 /* At this point, we are ready for interrupts */
5571 atomic_set(&bp->intr_sem, 0);
5573 /* flush all before enabling interrupts */
5577 bnx2x_int_enable(bp);
5579 /* Check for SPIO5 */
5580 bnx2x_attn_int_deasserted0(bp,
5581 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5582 AEU_INPUTS_ATTN_BITS_SPIO5);
5585 /* end of nic init */
5588 * gzip service functions
5591 static int bnx2x_gunzip_init(struct bnx2x *bp)
5593 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5594 &bp->gunzip_mapping);
5595 if (bp->gunzip_buf == NULL)
5598 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5599 if (bp->strm == NULL)
5602 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5604 if (bp->strm->workspace == NULL)
5614 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5615 bp->gunzip_mapping);
5616 bp->gunzip_buf = NULL;
5619 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5620 " un-compression\n", bp->dev->name);
5624 static void bnx2x_gunzip_end(struct bnx2x *bp)
5626 kfree(bp->strm->workspace);
5631 if (bp->gunzip_buf) {
5632 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5633 bp->gunzip_mapping);
5634 bp->gunzip_buf = NULL;
5638 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5642 /* check gzip header */
5643 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5644 BNX2X_ERR("Bad gzip header\n");
5652 if (zbuf[3] & FNAME)
5653 while ((zbuf[n++] != 0) && (n < len));
5655 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5656 bp->strm->avail_in = len - n;
5657 bp->strm->next_out = bp->gunzip_buf;
5658 bp->strm->avail_out = FW_BUF_SIZE;
5660 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5664 rc = zlib_inflate(bp->strm, Z_FINISH);
5665 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5666 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5667 bp->dev->name, bp->strm->msg);
5669 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5670 if (bp->gunzip_outlen & 0x3)
5671 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5672 " gunzip_outlen (%d) not aligned\n",
5673 bp->dev->name, bp->gunzip_outlen);
5674 bp->gunzip_outlen >>= 2;
5676 zlib_inflateEnd(bp->strm);
5678 if (rc == Z_STREAM_END)
5684 /* nic load/unload */
5687 * General service functions
5690 /* send a NIG loopback debug packet */
5691 static void bnx2x_lb_pckt(struct bnx2x *bp)
5695 /* Ethernet source and destination addresses */
5696 wb_write[0] = 0x55555555;
5697 wb_write[1] = 0x55555555;
5698 wb_write[2] = 0x20; /* SOP */
5699 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5701 /* NON-IP protocol */
5702 wb_write[0] = 0x09000000;
5703 wb_write[1] = 0x55555555;
5704 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5705 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5708 /* some of the internal memories
5709 * are not directly readable from the driver
5710 * to test them we send debug packets
5712 static int bnx2x_int_mem_test(struct bnx2x *bp)
5718 if (CHIP_REV_IS_FPGA(bp))
5720 else if (CHIP_REV_IS_EMUL(bp))
5725 DP(NETIF_MSG_HW, "start part1\n");
5727 /* Disable inputs of parser neighbor blocks */
5728 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5729 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5730 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5731 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5733 /* Write 0 to parser credits for CFC search request */
5734 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5736 /* send Ethernet packet */
5739 /* TODO do i reset NIG statistic? */
5740 /* Wait until NIG register shows 1 packet of size 0x10 */
5741 count = 1000 * factor;
5744 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5745 val = *bnx2x_sp(bp, wb_data[0]);
5753 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5757 /* Wait until PRS register shows 1 packet */
5758 count = 1000 * factor;
5760 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5768 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5772 /* Reset and init BRB, PRS */
5773 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5775 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5777 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5778 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5780 DP(NETIF_MSG_HW, "part2\n");
5782 /* Disable inputs of parser neighbor blocks */
5783 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5784 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5785 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5786 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5788 /* Write 0 to parser credits for CFC search request */
5789 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5791 /* send 10 Ethernet packets */
5792 for (i = 0; i < 10; i++)
5795 /* Wait until NIG register shows 10 + 1
5796 packets of size 11*0x10 = 0xb0 */
5797 count = 1000 * factor;
5800 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5801 val = *bnx2x_sp(bp, wb_data[0]);
5809 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5813 /* Wait until PRS register shows 2 packets */
5814 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5816 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5818 /* Write 1 to parser credits for CFC search request */
5819 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5821 /* Wait until PRS register shows 3 packets */
5822 msleep(10 * factor);
5823 /* Wait until NIG register shows 1 packet of size 0x10 */
5824 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5826 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5828 /* clear NIG EOP FIFO */
5829 for (i = 0; i < 11; i++)
5830 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5831 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5833 BNX2X_ERR("clear of NIG failed\n");
5837 /* Reset and init BRB, PRS, NIG */
5838 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5840 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5842 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5843 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5846 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5849 /* Enable inputs of parser neighbor blocks */
5850 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5851 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5852 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5853 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5855 DP(NETIF_MSG_HW, "done\n");
5860 static void enable_blocks_attention(struct bnx2x *bp)
5862 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5863 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5864 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5865 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5866 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5867 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5868 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5869 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5870 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5871 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5872 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5873 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5874 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5875 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5876 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5877 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5878 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5879 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5880 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5881 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5882 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5883 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5884 if (CHIP_REV_IS_FPGA(bp))
5885 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5887 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5888 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5889 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5890 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5891 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5892 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5893 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5894 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5895 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5896 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5900 static void bnx2x_reset_common(struct bnx2x *bp)
5903 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5905 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5909 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5915 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5916 SHARED_HW_CFG_FAN_FAILURE_MASK;
5918 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5922 * The fan failure mechanism is usually related to the PHY type since
5923 * the power consumption of the board is affected by the PHY. Currently,
5924 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5926 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5927 for (port = PORT_0; port < PORT_MAX; port++) {
5929 SHMEM_RD(bp, dev_info.port_hw_config[port].
5930 external_phy_config) &
5931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5934 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5936 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5938 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5941 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5943 if (is_required == 0)
5946 /* Fan failure is indicated by SPIO 5 */
5947 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5948 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5950 /* set to active low mode */
5951 val = REG_RD(bp, MISC_REG_SPIO_INT);
5952 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5953 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5954 REG_WR(bp, MISC_REG_SPIO_INT, val);
5956 /* enable interrupt to signal the IGU */
5957 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5958 val |= (1 << MISC_REGISTERS_SPIO_5);
5959 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5962 static int bnx2x_init_common(struct bnx2x *bp)
5966 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5968 bnx2x_reset_common(bp);
5969 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5970 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5972 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5973 if (CHIP_IS_E1H(bp))
5974 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5976 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5978 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5980 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5981 if (CHIP_IS_E1(bp)) {
5982 /* enable HW interrupt from PXP on USDM overflow
5983 bit 16 on INT_MASK_0 */
5984 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5987 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5991 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5992 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5993 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5994 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5995 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5996 /* make sure this value is 0 */
5997 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5999 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6000 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6001 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6002 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6003 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6006 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6008 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6009 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6010 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6013 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6014 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6016 /* let the HW do it's magic ... */
6018 /* finish PXP init */
6019 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6021 BNX2X_ERR("PXP2 CFG failed\n");
6024 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6026 BNX2X_ERR("PXP2 RD_INIT failed\n");
6030 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6031 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6033 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6035 /* clean the DMAE memory */
6037 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6039 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6040 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6041 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6042 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6044 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6045 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6046 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6047 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6049 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6050 /* soft reset pulse */
6051 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6052 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6055 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6058 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6059 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6060 if (!CHIP_REV_IS_SLOW(bp)) {
6061 /* enable hw interrupt from doorbell Q */
6062 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6065 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6066 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6067 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6069 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6070 if (CHIP_IS_E1H(bp))
6071 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6073 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6074 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6075 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6076 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6078 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6079 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6080 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6081 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6083 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6084 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6085 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6086 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6089 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6091 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6094 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6095 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6096 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6098 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6099 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6100 REG_WR(bp, i, 0xc0cac01a);
6101 /* TODO: replace with something meaningful */
6103 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6104 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6106 if (sizeof(union cdu_context) != 1024)
6107 /* we currently assume that a context is 1024 bytes */
6108 printk(KERN_ALERT PFX "please adjust the size of"
6109 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6111 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6112 val = (4 << 24) + (0 << 12) + 1024;
6113 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6115 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6116 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6117 /* enable context validation interrupt from CFC */
6118 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6120 /* set the thresholds to prevent CFC/CDU race */
6121 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6123 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6124 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6126 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6127 /* Reset PCIE errors for debug */
6128 REG_WR(bp, 0x2814, 0xffffffff);
6129 REG_WR(bp, 0x3820, 0xffffffff);
6131 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6132 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6133 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6134 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6136 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6137 if (CHIP_IS_E1H(bp)) {
6138 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6139 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6142 if (CHIP_REV_IS_SLOW(bp))
6145 /* finish CFC init */
6146 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6148 BNX2X_ERR("CFC LL_INIT failed\n");
6151 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6153 BNX2X_ERR("CFC AC_INIT failed\n");
6156 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6158 BNX2X_ERR("CFC CAM_INIT failed\n");
6161 REG_WR(bp, CFC_REG_DEBUG0, 0);
6163 /* read NIG statistic
6164 to see if this is our first up since powerup */
6165 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6166 val = *bnx2x_sp(bp, wb_data[0]);
6168 /* do internal memory self test */
6169 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6170 BNX2X_ERR("internal mem self test failed\n");
6174 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6175 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6176 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6178 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6179 bp->port.need_hw_lock = 1;
6186 bnx2x_setup_fan_failure_detection(bp);
6188 /* clear PXP2 attentions */
6189 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6191 enable_blocks_attention(bp);
6193 if (!BP_NOMCP(bp)) {
6194 bnx2x_acquire_phy_lock(bp);
6195 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6196 bnx2x_release_phy_lock(bp);
6198 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6203 static int bnx2x_init_port(struct bnx2x *bp)
6205 int port = BP_PORT(bp);
6206 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6210 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6212 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6214 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6215 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6217 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6218 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6219 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6224 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6225 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6226 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6227 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6232 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6233 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6234 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6235 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6240 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6241 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6242 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6243 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6245 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6248 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6249 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6251 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6253 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6255 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6256 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6257 /* no pause for emulation and FPGA */
6262 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6263 else if (bp->dev->mtu > 4096) {
6264 if (bp->flags & ONE_PORT_FLAG)
6268 /* (24*1024 + val*4)/256 */
6269 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6272 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6273 high = low + 56; /* 14*1024/256 */
6275 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6276 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6279 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6281 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6282 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6283 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6284 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6286 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6287 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6288 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6289 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6291 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6292 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6294 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6296 /* configure PBF to work without PAUSE mtu 9000 */
6297 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6299 /* update threshold */
6300 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6301 /* update init credit */
6302 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6305 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6307 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6310 /* tell the searcher where the T2 table is */
6311 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6313 wb_write[0] = U64_LO(bp->t2_mapping);
6314 wb_write[1] = U64_HI(bp->t2_mapping);
6315 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6316 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6317 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6318 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6320 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6322 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6323 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6325 if (CHIP_IS_E1(bp)) {
6326 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6327 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6329 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6331 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6332 /* init aeu_mask_attn_func_0/1:
6333 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6334 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6335 * bits 4-7 are used for "per vn group attention" */
6336 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6337 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6339 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6340 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6341 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6342 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6343 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6345 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6347 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6349 if (CHIP_IS_E1H(bp)) {
6350 /* 0x2 disable e1hov, 0x1 enable */
6351 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6352 (IS_E1HMF(bp) ? 0x1 : 0x2));
6354 /* support pause requests from USDM, TSDM and BRB */
6355 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6358 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6359 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6360 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6364 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6365 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6367 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6370 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6372 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6373 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6375 /* The GPIO should be swapped if the swap register is
6377 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6378 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6380 /* Select function upon port-swap configuration */
6382 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6383 aeu_gpio_mask = (swap_val && swap_override) ?
6384 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6385 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6387 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6388 aeu_gpio_mask = (swap_val && swap_override) ?
6389 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6390 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6392 val = REG_RD(bp, offset);
6393 /* add GPIO3 to group */
6394 val |= aeu_gpio_mask;
6395 REG_WR(bp, offset, val);
6399 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6400 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6401 /* add SPIO 5 to group 0 */
6403 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6404 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6405 val = REG_RD(bp, reg_addr);
6406 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6407 REG_WR(bp, reg_addr, val);
6415 bnx2x__link_reset(bp);
6420 #define ILT_PER_FUNC (768/2)
6421 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6422 /* the phys address is shifted right 12 bits and has an added
6423 1=valid bit added to the 53rd bit
6424 then since this is a wide register(TM)
6425 we split it into two 32 bit writes
6427 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6428 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6429 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6430 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6432 #define CNIC_ILT_LINES 0
6434 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6438 if (CHIP_IS_E1H(bp))
6439 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6441 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6443 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6446 static int bnx2x_init_func(struct bnx2x *bp)
6448 int port = BP_PORT(bp);
6449 int func = BP_FUNC(bp);
6453 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6455 /* set MSI reconfigure capability */
6456 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6457 val = REG_RD(bp, addr);
6458 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6459 REG_WR(bp, addr, val);
6461 i = FUNC_ILT_BASE(func);
6463 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6464 if (CHIP_IS_E1H(bp)) {
6465 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6466 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6468 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6469 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6472 if (CHIP_IS_E1H(bp)) {
6473 for (i = 0; i < 9; i++)
6474 bnx2x_init_block(bp,
6475 cm_blocks[i], FUNC0_STAGE + func);
6477 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6478 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6481 /* HC init per function */
6482 if (CHIP_IS_E1H(bp)) {
6483 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6485 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6486 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6488 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6490 /* Reset PCIE errors for debug */
6491 REG_WR(bp, 0x2114, 0xffffffff);
6492 REG_WR(bp, 0x2120, 0xffffffff);
6497 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6501 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6502 BP_FUNC(bp), load_code);
6505 mutex_init(&bp->dmae_mutex);
6506 bnx2x_gunzip_init(bp);
6508 switch (load_code) {
6509 case FW_MSG_CODE_DRV_LOAD_COMMON:
6510 rc = bnx2x_init_common(bp);
6515 case FW_MSG_CODE_DRV_LOAD_PORT:
6517 rc = bnx2x_init_port(bp);
6522 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6524 rc = bnx2x_init_func(bp);
6530 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6534 if (!BP_NOMCP(bp)) {
6535 int func = BP_FUNC(bp);
6537 bp->fw_drv_pulse_wr_seq =
6538 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6539 DRV_PULSE_SEQ_MASK);
6540 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6543 /* this needs to be done before gunzip end */
6544 bnx2x_zero_def_sb(bp);
6545 for_each_queue(bp, i)
6546 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6549 bnx2x_gunzip_end(bp);
6554 static void bnx2x_free_mem(struct bnx2x *bp)
6557 #define BNX2X_PCI_FREE(x, y, size) \
6560 pci_free_consistent(bp->pdev, size, x, y); \
6566 #define BNX2X_FREE(x) \
6578 for_each_queue(bp, i) {
6581 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6582 bnx2x_fp(bp, i, status_blk_mapping),
6583 sizeof(struct host_status_block));
6586 for_each_rx_queue(bp, i) {
6588 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6589 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6590 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6591 bnx2x_fp(bp, i, rx_desc_mapping),
6592 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6594 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6595 bnx2x_fp(bp, i, rx_comp_mapping),
6596 sizeof(struct eth_fast_path_rx_cqe) *
6600 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6601 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6602 bnx2x_fp(bp, i, rx_sge_mapping),
6603 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6606 for_each_tx_queue(bp, i) {
6608 /* fastpath tx rings: tx_buf tx_desc */
6609 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6610 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6611 bnx2x_fp(bp, i, tx_desc_mapping),
6612 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6614 /* end of fastpath */
6616 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6617 sizeof(struct host_def_status_block));
6619 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6620 sizeof(struct bnx2x_slowpath));
6623 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6624 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6625 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6626 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6628 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6630 #undef BNX2X_PCI_FREE
6634 static int bnx2x_alloc_mem(struct bnx2x *bp)
6637 #define BNX2X_PCI_ALLOC(x, y, size) \
6639 x = pci_alloc_consistent(bp->pdev, size, y); \
6641 goto alloc_mem_err; \
6642 memset(x, 0, size); \
6645 #define BNX2X_ALLOC(x, size) \
6647 x = vmalloc(size); \
6649 goto alloc_mem_err; \
6650 memset(x, 0, size); \
6657 for_each_queue(bp, i) {
6658 bnx2x_fp(bp, i, bp) = bp;
6661 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6662 &bnx2x_fp(bp, i, status_blk_mapping),
6663 sizeof(struct host_status_block));
6666 for_each_rx_queue(bp, i) {
6668 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6669 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6670 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6671 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6672 &bnx2x_fp(bp, i, rx_desc_mapping),
6673 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6675 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6676 &bnx2x_fp(bp, i, rx_comp_mapping),
6677 sizeof(struct eth_fast_path_rx_cqe) *
6681 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6682 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6683 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6684 &bnx2x_fp(bp, i, rx_sge_mapping),
6685 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6688 for_each_tx_queue(bp, i) {
6690 /* fastpath tx rings: tx_buf tx_desc */
6691 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6692 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6693 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6694 &bnx2x_fp(bp, i, tx_desc_mapping),
6695 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6697 /* end of fastpath */
6699 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6700 sizeof(struct host_def_status_block));
6702 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6703 sizeof(struct bnx2x_slowpath));
6706 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6709 for (i = 0; i < 64*1024; i += 64) {
6710 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6711 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6714 /* allocate searcher T2 table
6715 we allocate 1/4 of alloc num for T2
6716 (which is not entered into the ILT) */
6717 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6720 for (i = 0; i < 16*1024; i += 64)
6721 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6723 /* now fixup the last line in the block to point to the next block */
6724 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6726 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6727 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6729 /* QM queues (128*MAX_CONN) */
6730 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6733 /* Slow path ring */
6734 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6742 #undef BNX2X_PCI_ALLOC
6746 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6750 for_each_tx_queue(bp, i) {
6751 struct bnx2x_fastpath *fp = &bp->fp[i];
6753 u16 bd_cons = fp->tx_bd_cons;
6754 u16 sw_prod = fp->tx_pkt_prod;
6755 u16 sw_cons = fp->tx_pkt_cons;
6757 while (sw_cons != sw_prod) {
6758 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6764 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6768 for_each_rx_queue(bp, j) {
6769 struct bnx2x_fastpath *fp = &bp->fp[j];
6771 for (i = 0; i < NUM_RX_BD; i++) {
6772 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6773 struct sk_buff *skb = rx_buf->skb;
6778 pci_unmap_single(bp->pdev,
6779 pci_unmap_addr(rx_buf, mapping),
6780 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6785 if (!fp->disable_tpa)
6786 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6787 ETH_MAX_AGGREGATION_QUEUES_E1 :
6788 ETH_MAX_AGGREGATION_QUEUES_E1H);
6792 static void bnx2x_free_skbs(struct bnx2x *bp)
6794 bnx2x_free_tx_skbs(bp);
6795 bnx2x_free_rx_skbs(bp);
6798 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6802 free_irq(bp->msix_table[0].vector, bp->dev);
6803 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6804 bp->msix_table[0].vector);
6806 for_each_queue(bp, i) {
6807 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6808 "state %x\n", i, bp->msix_table[i + offset].vector,
6809 bnx2x_fp(bp, i, state));
6811 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6815 static void bnx2x_free_irq(struct bnx2x *bp)
6817 if (bp->flags & USING_MSIX_FLAG) {
6818 bnx2x_free_msix_irqs(bp);
6819 pci_disable_msix(bp->pdev);
6820 bp->flags &= ~USING_MSIX_FLAG;
6822 } else if (bp->flags & USING_MSI_FLAG) {
6823 free_irq(bp->pdev->irq, bp->dev);
6824 pci_disable_msi(bp->pdev);
6825 bp->flags &= ~USING_MSI_FLAG;
6828 free_irq(bp->pdev->irq, bp->dev);
6831 static int bnx2x_enable_msix(struct bnx2x *bp)
6833 int i, rc, offset = 1;
6836 bp->msix_table[0].entry = igu_vec;
6837 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6839 for_each_queue(bp, i) {
6840 igu_vec = BP_L_ID(bp) + offset + i;
6841 bp->msix_table[i + offset].entry = igu_vec;
6842 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6843 "(fastpath #%u)\n", i + offset, igu_vec, i);
6846 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6847 BNX2X_NUM_QUEUES(bp) + offset);
6849 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6853 bp->flags |= USING_MSIX_FLAG;
6858 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6860 int i, rc, offset = 1;
6862 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6863 bp->dev->name, bp->dev);
6865 BNX2X_ERR("request sp irq failed\n");
6869 for_each_queue(bp, i) {
6870 struct bnx2x_fastpath *fp = &bp->fp[i];
6872 if (i < bp->num_rx_queues)
6873 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6875 sprintf(fp->name, "%s-tx-%d",
6876 bp->dev->name, i - bp->num_rx_queues);
6878 rc = request_irq(bp->msix_table[i + offset].vector,
6879 bnx2x_msix_fp_int, 0, fp->name, fp);
6881 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6882 bnx2x_free_msix_irqs(bp);
6886 fp->state = BNX2X_FP_STATE_IRQ;
6889 i = BNX2X_NUM_QUEUES(bp);
6890 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6892 bp->dev->name, bp->msix_table[0].vector,
6893 0, bp->msix_table[offset].vector,
6894 i - 1, bp->msix_table[offset + i - 1].vector);
6899 static int bnx2x_enable_msi(struct bnx2x *bp)
6903 rc = pci_enable_msi(bp->pdev);
6905 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6908 bp->flags |= USING_MSI_FLAG;
6913 static int bnx2x_req_irq(struct bnx2x *bp)
6915 unsigned long flags;
6918 if (bp->flags & USING_MSI_FLAG)
6921 flags = IRQF_SHARED;
6923 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6924 bp->dev->name, bp->dev);
6926 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6931 static void bnx2x_napi_enable(struct bnx2x *bp)
6935 for_each_rx_queue(bp, i)
6936 napi_enable(&bnx2x_fp(bp, i, napi));
6939 static void bnx2x_napi_disable(struct bnx2x *bp)
6943 for_each_rx_queue(bp, i)
6944 napi_disable(&bnx2x_fp(bp, i, napi));
6947 static void bnx2x_netif_start(struct bnx2x *bp)
6951 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6952 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6955 if (netif_running(bp->dev)) {
6956 bnx2x_napi_enable(bp);
6957 bnx2x_int_enable(bp);
6958 if (bp->state == BNX2X_STATE_OPEN)
6959 netif_tx_wake_all_queues(bp->dev);
6964 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6966 bnx2x_int_disable_sync(bp, disable_hw);
6967 bnx2x_napi_disable(bp);
6968 netif_tx_disable(bp->dev);
6969 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6973 * Init service functions
6976 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6978 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6979 int port = BP_PORT(bp);
6982 * unicasts 0-31:port0 32-63:port1
6983 * multicast 64-127:port0 128-191:port1
6985 config->hdr.length = 2;
6986 config->hdr.offset = port ? 32 : 0;
6987 config->hdr.client_id = bp->fp->cl_id;
6988 config->hdr.reserved1 = 0;
6991 config->config_table[0].cam_entry.msb_mac_addr =
6992 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6993 config->config_table[0].cam_entry.middle_mac_addr =
6994 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6995 config->config_table[0].cam_entry.lsb_mac_addr =
6996 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6997 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6999 config->config_table[0].target_table_entry.flags = 0;
7001 CAM_INVALIDATE(config->config_table[0]);
7002 config->config_table[0].target_table_entry.clients_bit_vector =
7003 cpu_to_le32(1 << BP_L_ID(bp));
7004 config->config_table[0].target_table_entry.vlan_id = 0;
7006 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7007 (set ? "setting" : "clearing"),
7008 config->config_table[0].cam_entry.msb_mac_addr,
7009 config->config_table[0].cam_entry.middle_mac_addr,
7010 config->config_table[0].cam_entry.lsb_mac_addr);
7013 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7014 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7015 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
7016 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7018 config->config_table[1].target_table_entry.flags =
7019 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7021 CAM_INVALIDATE(config->config_table[1]);
7022 config->config_table[1].target_table_entry.clients_bit_vector =
7023 cpu_to_le32(1 << BP_L_ID(bp));
7024 config->config_table[1].target_table_entry.vlan_id = 0;
7026 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7027 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7028 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7031 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7033 struct mac_configuration_cmd_e1h *config =
7034 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7036 /* CAM allocation for E1H
7037 * unicasts: by func number
7038 * multicast: 20+FUNC*20, 20 each
7040 config->hdr.length = 1;
7041 config->hdr.offset = BP_FUNC(bp);
7042 config->hdr.client_id = bp->fp->cl_id;
7043 config->hdr.reserved1 = 0;
7046 config->config_table[0].msb_mac_addr =
7047 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7048 config->config_table[0].middle_mac_addr =
7049 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7050 config->config_table[0].lsb_mac_addr =
7051 swab16(*(u16 *)&bp->dev->dev_addr[4]);
7052 config->config_table[0].clients_bit_vector =
7053 cpu_to_le32(1 << BP_L_ID(bp));
7054 config->config_table[0].vlan_id = 0;
7055 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7057 config->config_table[0].flags = BP_PORT(bp);
7059 config->config_table[0].flags =
7060 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7062 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7063 (set ? "setting" : "clearing"),
7064 config->config_table[0].msb_mac_addr,
7065 config->config_table[0].middle_mac_addr,
7066 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7068 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7069 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7070 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7073 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7074 int *state_p, int poll)
7076 /* can take a while if any port is running */
7079 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7080 poll ? "polling" : "waiting", state, idx);
7085 bnx2x_rx_int(bp->fp, 10);
7086 /* if index is different from 0
7087 * the reply for some commands will
7088 * be on the non default queue
7091 bnx2x_rx_int(&bp->fp[idx], 10);
7094 mb(); /* state is changed by bnx2x_sp_event() */
7095 if (*state_p == state) {
7096 #ifdef BNX2X_STOP_ON_ERROR
7097 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7106 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7107 poll ? "polling" : "waiting", state, idx);
7108 #ifdef BNX2X_STOP_ON_ERROR
7115 static int bnx2x_setup_leading(struct bnx2x *bp)
7119 /* reset IGU state */
7120 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7123 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7125 /* Wait for completion */
7126 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7131 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7133 struct bnx2x_fastpath *fp = &bp->fp[index];
7135 /* reset IGU state */
7136 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7139 fp->state = BNX2X_FP_STATE_OPENING;
7140 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7143 /* Wait for completion */
7144 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7148 static int bnx2x_poll(struct napi_struct *napi, int budget);
7150 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7151 int *num_tx_queues_out)
7153 int _num_rx_queues = 0, _num_tx_queues = 0;
7155 switch (bp->multi_mode) {
7156 case ETH_RSS_MODE_DISABLED:
7161 case ETH_RSS_MODE_REGULAR:
7163 _num_rx_queues = min_t(u32, num_rx_queues,
7164 BNX2X_MAX_QUEUES(bp));
7166 _num_rx_queues = min_t(u32, num_online_cpus(),
7167 BNX2X_MAX_QUEUES(bp));
7170 _num_tx_queues = min_t(u32, num_tx_queues,
7171 BNX2X_MAX_QUEUES(bp));
7173 _num_tx_queues = min_t(u32, num_online_cpus(),
7174 BNX2X_MAX_QUEUES(bp));
7176 /* There must be not more Tx queues than Rx queues */
7177 if (_num_tx_queues > _num_rx_queues) {
7178 BNX2X_ERR("number of tx queues (%d) > "
7179 "number of rx queues (%d)"
7180 " defaulting to %d\n",
7181 _num_tx_queues, _num_rx_queues,
7183 _num_tx_queues = _num_rx_queues;
7194 *num_rx_queues_out = _num_rx_queues;
7195 *num_tx_queues_out = _num_tx_queues;
7198 static int bnx2x_set_int_mode(struct bnx2x *bp)
7205 bp->num_rx_queues = 1;
7206 bp->num_tx_queues = 1;
7207 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7212 /* Set interrupt mode according to bp->multi_mode value */
7213 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7214 &bp->num_tx_queues);
7216 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7217 bp->num_rx_queues, bp->num_tx_queues);
7219 /* if we can't use MSI-X we only need one fp,
7220 * so try to enable MSI-X with the requested number of fp's
7221 * and fallback to MSI or legacy INTx with one fp
7223 rc = bnx2x_enable_msix(bp);
7225 /* failed to enable MSI-X */
7227 BNX2X_ERR("Multi requested but failed to "
7228 "enable MSI-X (rx %d tx %d), "
7229 "set number of queues to 1\n",
7230 bp->num_rx_queues, bp->num_tx_queues);
7231 bp->num_rx_queues = 1;
7232 bp->num_tx_queues = 1;
7236 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7241 /* must be called with rtnl_lock */
7242 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7247 #ifdef BNX2X_STOP_ON_ERROR
7248 if (unlikely(bp->panic))
7252 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7254 rc = bnx2x_set_int_mode(bp);
7256 if (bnx2x_alloc_mem(bp))
7259 for_each_rx_queue(bp, i)
7260 bnx2x_fp(bp, i, disable_tpa) =
7261 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7263 for_each_rx_queue(bp, i)
7264 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7267 bnx2x_napi_enable(bp);
7269 if (bp->flags & USING_MSIX_FLAG) {
7270 rc = bnx2x_req_msix_irqs(bp);
7272 pci_disable_msix(bp->pdev);
7276 /* Fall to INTx if failed to enable MSI-X due to lack of
7277 memory (in bnx2x_set_int_mode()) */
7278 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7279 bnx2x_enable_msi(bp);
7281 rc = bnx2x_req_irq(bp);
7283 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7284 if (bp->flags & USING_MSI_FLAG)
7285 pci_disable_msi(bp->pdev);
7288 if (bp->flags & USING_MSI_FLAG) {
7289 bp->dev->irq = bp->pdev->irq;
7290 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7291 bp->dev->name, bp->pdev->irq);
7295 /* Send LOAD_REQUEST command to MCP
7296 Returns the type of LOAD command:
7297 if it is the first port to be initialized
7298 common blocks should be initialized, otherwise - not
7300 if (!BP_NOMCP(bp)) {
7301 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7303 BNX2X_ERR("MCP response failure, aborting\n");
7307 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7308 rc = -EBUSY; /* other port in diagnostic mode */
7313 int port = BP_PORT(bp);
7315 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7316 load_count[0], load_count[1], load_count[2]);
7318 load_count[1 + port]++;
7319 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7320 load_count[0], load_count[1], load_count[2]);
7321 if (load_count[0] == 1)
7322 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7323 else if (load_count[1 + port] == 1)
7324 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7326 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7329 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7330 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7334 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7337 rc = bnx2x_init_hw(bp, load_code);
7339 BNX2X_ERR("HW init failed, aborting\n");
7343 /* Setup NIC internals and enable interrupts */
7344 bnx2x_nic_init(bp, load_code);
7346 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7347 (bp->common.shmem2_base))
7348 SHMEM2_WR(bp, dcc_support,
7349 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7350 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7352 /* Send LOAD_DONE command to MCP */
7353 if (!BP_NOMCP(bp)) {
7354 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7356 BNX2X_ERR("MCP response failure, aborting\n");
7362 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7364 rc = bnx2x_setup_leading(bp);
7366 BNX2X_ERR("Setup leading failed!\n");
7370 if (CHIP_IS_E1H(bp))
7371 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7372 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7373 bp->state = BNX2X_STATE_DISABLED;
7376 if (bp->state == BNX2X_STATE_OPEN) {
7377 for_each_nondefault_queue(bp, i) {
7378 rc = bnx2x_setup_multi(bp, i);
7384 bnx2x_set_mac_addr_e1(bp, 1);
7386 bnx2x_set_mac_addr_e1h(bp, 1);
7390 bnx2x_initial_phy_init(bp, load_mode);
7392 /* Start fast path */
7393 switch (load_mode) {
7395 if (bp->state == BNX2X_STATE_OPEN) {
7396 /* Tx queue should be only reenabled */
7397 netif_tx_wake_all_queues(bp->dev);
7399 /* Initialize the receive filter. */
7400 bnx2x_set_rx_mode(bp->dev);
7404 netif_tx_start_all_queues(bp->dev);
7405 if (bp->state != BNX2X_STATE_OPEN)
7406 netif_tx_disable(bp->dev);
7407 /* Initialize the receive filter. */
7408 bnx2x_set_rx_mode(bp->dev);
7412 /* Initialize the receive filter. */
7413 bnx2x_set_rx_mode(bp->dev);
7414 bp->state = BNX2X_STATE_DIAG;
7422 bnx2x__link_status_update(bp);
7424 /* start the timer */
7425 mod_timer(&bp->timer, jiffies + bp->current_interval);
7431 bnx2x_int_disable_sync(bp, 1);
7432 if (!BP_NOMCP(bp)) {
7433 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7434 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7437 /* Free SKBs, SGEs, TPA pool and driver internals */
7438 bnx2x_free_skbs(bp);
7439 for_each_rx_queue(bp, i)
7440 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7445 bnx2x_napi_disable(bp);
7446 for_each_rx_queue(bp, i)
7447 netif_napi_del(&bnx2x_fp(bp, i, napi));
7453 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7455 struct bnx2x_fastpath *fp = &bp->fp[index];
7458 /* halt the connection */
7459 fp->state = BNX2X_FP_STATE_HALTING;
7460 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7462 /* Wait for completion */
7463 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7465 if (rc) /* timeout */
7468 /* delete cfc entry */
7469 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7471 /* Wait for completion */
7472 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7477 static int bnx2x_stop_leading(struct bnx2x *bp)
7479 __le16 dsb_sp_prod_idx;
7480 /* if the other port is handling traffic,
7481 this can take a lot of time */
7487 /* Send HALT ramrod */
7488 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7489 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7491 /* Wait for completion */
7492 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7493 &(bp->fp[0].state), 1);
7494 if (rc) /* timeout */
7497 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7499 /* Send PORT_DELETE ramrod */
7500 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7502 /* Wait for completion to arrive on default status block
7503 we are going to reset the chip anyway
7504 so there is not much to do if this times out
7506 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7508 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7509 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7510 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7511 #ifdef BNX2X_STOP_ON_ERROR
7519 rmb(); /* Refresh the dsb_sp_prod */
7521 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7522 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7527 static void bnx2x_reset_func(struct bnx2x *bp)
7529 int port = BP_PORT(bp);
7530 int func = BP_FUNC(bp);
7534 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7535 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7538 base = FUNC_ILT_BASE(func);
7539 for (i = base; i < base + ILT_PER_FUNC; i++)
7540 bnx2x_ilt_wr(bp, i, 0);
7543 static void bnx2x_reset_port(struct bnx2x *bp)
7545 int port = BP_PORT(bp);
7548 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7550 /* Do not rcv packets to BRB */
7551 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7552 /* Do not direct rcv packets that are not for MCP to the BRB */
7553 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7554 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7557 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7560 /* Check for BRB port occupancy */
7561 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7563 DP(NETIF_MSG_IFDOWN,
7564 "BRB1 is not empty %d blocks are occupied\n", val);
7566 /* TODO: Close Doorbell port? */
7569 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7571 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7572 BP_FUNC(bp), reset_code);
7574 switch (reset_code) {
7575 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7576 bnx2x_reset_port(bp);
7577 bnx2x_reset_func(bp);
7578 bnx2x_reset_common(bp);
7581 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7582 bnx2x_reset_port(bp);
7583 bnx2x_reset_func(bp);
7586 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7587 bnx2x_reset_func(bp);
7591 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7596 /* must be called with rtnl_lock */
7597 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7599 int port = BP_PORT(bp);
7603 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7605 bp->rx_mode = BNX2X_RX_MODE_NONE;
7606 bnx2x_set_storm_rx_mode(bp);
7608 bnx2x_netif_stop(bp, 1);
7610 del_timer_sync(&bp->timer);
7611 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7612 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7613 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7618 /* Wait until tx fastpath tasks complete */
7619 for_each_tx_queue(bp, i) {
7620 struct bnx2x_fastpath *fp = &bp->fp[i];
7623 while (bnx2x_has_tx_work_unload(fp)) {
7627 BNX2X_ERR("timeout waiting for queue[%d]\n",
7629 #ifdef BNX2X_STOP_ON_ERROR
7640 /* Give HW time to discard old tx messages */
7643 if (CHIP_IS_E1(bp)) {
7644 struct mac_configuration_cmd *config =
7645 bnx2x_sp(bp, mcast_config);
7647 bnx2x_set_mac_addr_e1(bp, 0);
7649 for (i = 0; i < config->hdr.length; i++)
7650 CAM_INVALIDATE(config->config_table[i]);
7652 config->hdr.length = i;
7653 if (CHIP_REV_IS_SLOW(bp))
7654 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7656 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7657 config->hdr.client_id = bp->fp->cl_id;
7658 config->hdr.reserved1 = 0;
7660 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7661 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7662 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7665 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7667 bnx2x_set_mac_addr_e1h(bp, 0);
7669 for (i = 0; i < MC_HASH_SIZE; i++)
7670 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7672 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7675 if (unload_mode == UNLOAD_NORMAL)
7676 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7678 else if (bp->flags & NO_WOL_FLAG)
7679 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7682 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7683 u8 *mac_addr = bp->dev->dev_addr;
7685 /* The mac address is written to entries 1-4 to
7686 preserve entry 0 which is used by the PMF */
7687 u8 entry = (BP_E1HVN(bp) + 1)*8;
7689 val = (mac_addr[0] << 8) | mac_addr[1];
7690 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7692 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7693 (mac_addr[4] << 8) | mac_addr[5];
7694 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7696 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7699 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7701 /* Close multi and leading connections
7702 Completions for ramrods are collected in a synchronous way */
7703 for_each_nondefault_queue(bp, i)
7704 if (bnx2x_stop_multi(bp, i))
7707 rc = bnx2x_stop_leading(bp);
7709 BNX2X_ERR("Stop leading failed!\n");
7710 #ifdef BNX2X_STOP_ON_ERROR
7719 reset_code = bnx2x_fw_command(bp, reset_code);
7721 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7722 load_count[0], load_count[1], load_count[2]);
7724 load_count[1 + port]--;
7725 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7726 load_count[0], load_count[1], load_count[2]);
7727 if (load_count[0] == 0)
7728 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7729 else if (load_count[1 + port] == 0)
7730 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7732 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7735 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7736 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7737 bnx2x__link_reset(bp);
7739 /* Reset the chip */
7740 bnx2x_reset_chip(bp, reset_code);
7742 /* Report UNLOAD_DONE to MCP */
7744 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7748 /* Free SKBs, SGEs, TPA pool and driver internals */
7749 bnx2x_free_skbs(bp);
7750 for_each_rx_queue(bp, i)
7751 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7752 for_each_rx_queue(bp, i)
7753 netif_napi_del(&bnx2x_fp(bp, i, napi));
7756 bp->state = BNX2X_STATE_CLOSED;
7758 netif_carrier_off(bp->dev);
7763 static void bnx2x_reset_task(struct work_struct *work)
7765 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7767 #ifdef BNX2X_STOP_ON_ERROR
7768 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7769 " so reset not done to allow debug dump,\n"
7770 " you will need to reboot when done\n");
7776 if (!netif_running(bp->dev))
7777 goto reset_task_exit;
7779 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7780 bnx2x_nic_load(bp, LOAD_NORMAL);
7786 /* end of nic load/unload */
7791 * Init service functions
7794 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7797 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7798 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7799 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7800 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7801 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7802 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7803 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7804 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7806 BNX2X_ERR("Unsupported function index: %d\n", func);
7811 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7813 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7815 /* Flush all outstanding writes */
7818 /* Pretend to be function 0 */
7820 /* Flush the GRC transaction (in the chip) */
7821 new_val = REG_RD(bp, reg);
7823 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7828 /* From now we are in the "like-E1" mode */
7829 bnx2x_int_disable(bp);
7831 /* Flush all outstanding writes */
7834 /* Restore the original funtion settings */
7835 REG_WR(bp, reg, orig_func);
7836 new_val = REG_RD(bp, reg);
7837 if (new_val != orig_func) {
7838 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7839 orig_func, new_val);
7844 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7846 if (CHIP_IS_E1H(bp))
7847 bnx2x_undi_int_disable_e1h(bp, func);
7849 bnx2x_int_disable(bp);
7852 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7856 /* Check if there is any driver already loaded */
7857 val = REG_RD(bp, MISC_REG_UNPREPARED);
7859 /* Check if it is the UNDI driver
7860 * UNDI driver initializes CID offset for normal bell to 0x7
7862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7863 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7865 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7867 int func = BP_FUNC(bp);
7871 /* clear the UNDI indication */
7872 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7874 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7876 /* try unload UNDI on port 0 */
7879 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7880 DRV_MSG_SEQ_NUMBER_MASK);
7881 reset_code = bnx2x_fw_command(bp, reset_code);
7883 /* if UNDI is loaded on the other port */
7884 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7886 /* send "DONE" for previous unload */
7887 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7889 /* unload UNDI on port 1 */
7892 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7893 DRV_MSG_SEQ_NUMBER_MASK);
7894 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7896 bnx2x_fw_command(bp, reset_code);
7899 /* now it's safe to release the lock */
7900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7902 bnx2x_undi_int_disable(bp, func);
7904 /* close input traffic and wait for it */
7905 /* Do not rcv packets to BRB */
7907 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7908 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7909 /* Do not direct rcv packets that are not for MCP to
7912 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7913 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7916 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7917 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7920 /* save NIG port swap info */
7921 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7922 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7925 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7928 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7930 /* take the NIG out of reset and restore swap values */
7932 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7933 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7934 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7935 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7937 /* send unload done to the MCP */
7938 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7940 /* restore our func and fw_seq */
7943 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7944 DRV_MSG_SEQ_NUMBER_MASK);
7947 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7951 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7953 u32 val, val2, val3, val4, id;
7956 /* Get the chip revision id and number. */
7957 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7958 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7959 id = ((val & 0xffff) << 16);
7960 val = REG_RD(bp, MISC_REG_CHIP_REV);
7961 id |= ((val & 0xf) << 12);
7962 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7963 id |= ((val & 0xff) << 4);
7964 val = REG_RD(bp, MISC_REG_BOND_ID);
7966 bp->common.chip_id = id;
7967 bp->link_params.chip_id = bp->common.chip_id;
7968 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7970 val = (REG_RD(bp, 0x2874) & 0x55);
7971 if ((bp->common.chip_id & 0x1) ||
7972 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7973 bp->flags |= ONE_PORT_FLAG;
7974 BNX2X_DEV_INFO("single port device\n");
7977 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7978 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7979 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7980 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7981 bp->common.flash_size, bp->common.flash_size);
7983 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7984 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
7985 bp->link_params.shmem_base = bp->common.shmem_base;
7986 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7987 bp->common.shmem_base, bp->common.shmem2_base);
7989 if (!bp->common.shmem_base ||
7990 (bp->common.shmem_base < 0xA0000) ||
7991 (bp->common.shmem_base >= 0xC0000)) {
7992 BNX2X_DEV_INFO("MCP not active\n");
7993 bp->flags |= NO_MCP_FLAG;
7997 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7998 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7999 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8000 BNX2X_ERR("BAD MCP validity signature\n");
8002 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8003 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8005 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8006 SHARED_HW_CFG_LED_MODE_MASK) >>
8007 SHARED_HW_CFG_LED_MODE_SHIFT);
8009 bp->link_params.feature_config_flags = 0;
8010 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8011 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8012 bp->link_params.feature_config_flags |=
8013 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8015 bp->link_params.feature_config_flags &=
8016 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8018 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8019 bp->common.bc_ver = val;
8020 BNX2X_DEV_INFO("bc_ver %X\n", val);
8021 if (val < BNX2X_BC_VER) {
8022 /* for now only warn
8023 * later we might need to enforce this */
8024 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8025 " please upgrade BC\n", BNX2X_BC_VER, val);
8027 bp->link_params.feature_config_flags |=
8028 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8029 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8031 if (BP_E1HVN(bp) == 0) {
8032 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8033 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8035 /* no WOL capability for E1HVN != 0 */
8036 bp->flags |= NO_WOL_FLAG;
8038 BNX2X_DEV_INFO("%sWoL capable\n",
8039 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8041 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8042 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8043 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8044 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8046 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8047 val, val2, val3, val4);
8050 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8053 int port = BP_PORT(bp);
8056 switch (switch_cfg) {
8058 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8061 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8062 switch (ext_phy_type) {
8063 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8064 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8067 bp->port.supported |= (SUPPORTED_10baseT_Half |
8068 SUPPORTED_10baseT_Full |
8069 SUPPORTED_100baseT_Half |
8070 SUPPORTED_100baseT_Full |
8071 SUPPORTED_1000baseT_Full |
8072 SUPPORTED_2500baseX_Full |
8077 SUPPORTED_Asym_Pause);
8080 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8081 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8084 bp->port.supported |= (SUPPORTED_10baseT_Half |
8085 SUPPORTED_10baseT_Full |
8086 SUPPORTED_100baseT_Half |
8087 SUPPORTED_100baseT_Full |
8088 SUPPORTED_1000baseT_Full |
8093 SUPPORTED_Asym_Pause);
8097 BNX2X_ERR("NVRAM config error. "
8098 "BAD SerDes ext_phy_config 0x%x\n",
8099 bp->link_params.ext_phy_config);
8103 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8105 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8108 case SWITCH_CFG_10G:
8109 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8112 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8113 switch (ext_phy_type) {
8114 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8115 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8118 bp->port.supported |= (SUPPORTED_10baseT_Half |
8119 SUPPORTED_10baseT_Full |
8120 SUPPORTED_100baseT_Half |
8121 SUPPORTED_100baseT_Full |
8122 SUPPORTED_1000baseT_Full |
8123 SUPPORTED_2500baseX_Full |
8124 SUPPORTED_10000baseT_Full |
8129 SUPPORTED_Asym_Pause);
8132 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8133 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8136 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8137 SUPPORTED_1000baseT_Full |
8141 SUPPORTED_Asym_Pause);
8144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8145 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8148 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8149 SUPPORTED_2500baseX_Full |
8150 SUPPORTED_1000baseT_Full |
8154 SUPPORTED_Asym_Pause);
8157 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8158 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8161 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8164 SUPPORTED_Asym_Pause);
8167 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8168 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8171 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8172 SUPPORTED_1000baseT_Full |
8175 SUPPORTED_Asym_Pause);
8178 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8179 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8182 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8183 SUPPORTED_1000baseT_Full |
8187 SUPPORTED_Asym_Pause);
8190 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8191 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8194 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8195 SUPPORTED_1000baseT_Full |
8199 SUPPORTED_Asym_Pause);
8202 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8203 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8206 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8210 SUPPORTED_Asym_Pause);
8213 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8214 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8217 bp->port.supported |= (SUPPORTED_10baseT_Half |
8218 SUPPORTED_10baseT_Full |
8219 SUPPORTED_100baseT_Half |
8220 SUPPORTED_100baseT_Full |
8221 SUPPORTED_1000baseT_Full |
8222 SUPPORTED_10000baseT_Full |
8226 SUPPORTED_Asym_Pause);
8229 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8230 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8231 bp->link_params.ext_phy_config);
8235 BNX2X_ERR("NVRAM config error. "
8236 "BAD XGXS ext_phy_config 0x%x\n",
8237 bp->link_params.ext_phy_config);
8241 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8243 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8248 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8249 bp->port.link_config);
8252 bp->link_params.phy_addr = bp->port.phy_addr;
8254 /* mask what we support according to speed_cap_mask */
8255 if (!(bp->link_params.speed_cap_mask &
8256 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8257 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8259 if (!(bp->link_params.speed_cap_mask &
8260 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8261 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8263 if (!(bp->link_params.speed_cap_mask &
8264 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8265 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8267 if (!(bp->link_params.speed_cap_mask &
8268 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8269 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8271 if (!(bp->link_params.speed_cap_mask &
8272 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8273 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8274 SUPPORTED_1000baseT_Full);
8276 if (!(bp->link_params.speed_cap_mask &
8277 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8278 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8280 if (!(bp->link_params.speed_cap_mask &
8281 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8282 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8284 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8287 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8289 bp->link_params.req_duplex = DUPLEX_FULL;
8291 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8292 case PORT_FEATURE_LINK_SPEED_AUTO:
8293 if (bp->port.supported & SUPPORTED_Autoneg) {
8294 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8295 bp->port.advertising = bp->port.supported;
8298 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8300 if ((ext_phy_type ==
8301 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8303 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8304 /* force 10G, no AN */
8305 bp->link_params.req_line_speed = SPEED_10000;
8306 bp->port.advertising =
8307 (ADVERTISED_10000baseT_Full |
8311 BNX2X_ERR("NVRAM config error. "
8312 "Invalid link_config 0x%x"
8313 " Autoneg not supported\n",
8314 bp->port.link_config);
8319 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8320 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8321 bp->link_params.req_line_speed = SPEED_10;
8322 bp->port.advertising = (ADVERTISED_10baseT_Full |
8325 BNX2X_ERR("NVRAM config error. "
8326 "Invalid link_config 0x%x"
8327 " speed_cap_mask 0x%x\n",
8328 bp->port.link_config,
8329 bp->link_params.speed_cap_mask);
8334 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8335 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8336 bp->link_params.req_line_speed = SPEED_10;
8337 bp->link_params.req_duplex = DUPLEX_HALF;
8338 bp->port.advertising = (ADVERTISED_10baseT_Half |
8341 BNX2X_ERR("NVRAM config error. "
8342 "Invalid link_config 0x%x"
8343 " speed_cap_mask 0x%x\n",
8344 bp->port.link_config,
8345 bp->link_params.speed_cap_mask);
8350 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8351 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8352 bp->link_params.req_line_speed = SPEED_100;
8353 bp->port.advertising = (ADVERTISED_100baseT_Full |
8356 BNX2X_ERR("NVRAM config error. "
8357 "Invalid link_config 0x%x"
8358 " speed_cap_mask 0x%x\n",
8359 bp->port.link_config,
8360 bp->link_params.speed_cap_mask);
8365 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8366 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8367 bp->link_params.req_line_speed = SPEED_100;
8368 bp->link_params.req_duplex = DUPLEX_HALF;
8369 bp->port.advertising = (ADVERTISED_100baseT_Half |
8372 BNX2X_ERR("NVRAM config error. "
8373 "Invalid link_config 0x%x"
8374 " speed_cap_mask 0x%x\n",
8375 bp->port.link_config,
8376 bp->link_params.speed_cap_mask);
8381 case PORT_FEATURE_LINK_SPEED_1G:
8382 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8383 bp->link_params.req_line_speed = SPEED_1000;
8384 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8387 BNX2X_ERR("NVRAM config error. "
8388 "Invalid link_config 0x%x"
8389 " speed_cap_mask 0x%x\n",
8390 bp->port.link_config,
8391 bp->link_params.speed_cap_mask);
8396 case PORT_FEATURE_LINK_SPEED_2_5G:
8397 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8398 bp->link_params.req_line_speed = SPEED_2500;
8399 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8402 BNX2X_ERR("NVRAM config error. "
8403 "Invalid link_config 0x%x"
8404 " speed_cap_mask 0x%x\n",
8405 bp->port.link_config,
8406 bp->link_params.speed_cap_mask);
8411 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8412 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8413 case PORT_FEATURE_LINK_SPEED_10G_KR:
8414 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8415 bp->link_params.req_line_speed = SPEED_10000;
8416 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8419 BNX2X_ERR("NVRAM config error. "
8420 "Invalid link_config 0x%x"
8421 " speed_cap_mask 0x%x\n",
8422 bp->port.link_config,
8423 bp->link_params.speed_cap_mask);
8429 BNX2X_ERR("NVRAM config error. "
8430 "BAD link speed link_config 0x%x\n",
8431 bp->port.link_config);
8432 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8433 bp->port.advertising = bp->port.supported;
8437 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8438 PORT_FEATURE_FLOW_CONTROL_MASK);
8439 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8440 !(bp->port.supported & SUPPORTED_Autoneg))
8441 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8443 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8444 " advertising 0x%x\n",
8445 bp->link_params.req_line_speed,
8446 bp->link_params.req_duplex,
8447 bp->link_params.req_flow_ctrl, bp->port.advertising);
8450 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8452 int port = BP_PORT(bp);
8458 bp->link_params.bp = bp;
8459 bp->link_params.port = port;
8461 bp->link_params.lane_config =
8462 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8463 bp->link_params.ext_phy_config =
8465 dev_info.port_hw_config[port].external_phy_config);
8466 /* BCM8727_NOC => BCM8727 no over current */
8467 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8468 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8469 bp->link_params.ext_phy_config &=
8470 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8471 bp->link_params.ext_phy_config |=
8472 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8473 bp->link_params.feature_config_flags |=
8474 FEATURE_CONFIG_BCM8727_NOC;
8477 bp->link_params.speed_cap_mask =
8479 dev_info.port_hw_config[port].speed_capability_mask);
8481 bp->port.link_config =
8482 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8484 /* Get the 4 lanes xgxs config rx and tx */
8485 for (i = 0; i < 2; i++) {
8487 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8488 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8489 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8492 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8493 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8494 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8497 /* If the device is capable of WoL, set the default state according
8500 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8501 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8502 (config & PORT_FEATURE_WOL_ENABLED));
8504 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8505 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8506 bp->link_params.lane_config,
8507 bp->link_params.ext_phy_config,
8508 bp->link_params.speed_cap_mask, bp->port.link_config);
8510 bp->link_params.switch_cfg |= (bp->port.link_config &
8511 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8512 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8514 bnx2x_link_settings_requested(bp);
8517 * If connected directly, work with the internal PHY, otherwise, work
8518 * with the external PHY
8520 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8521 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8522 bp->mdio.prtad = bp->link_params.phy_addr;
8524 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8525 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8527 (bp->link_params.ext_phy_config &
8528 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8529 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8531 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8532 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8533 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8534 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8535 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8536 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8537 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8538 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8539 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8540 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8543 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8545 int func = BP_FUNC(bp);
8549 bnx2x_get_common_hwinfo(bp);
8553 if (CHIP_IS_E1H(bp)) {
8555 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8557 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8558 FUNC_MF_CFG_E1HOV_TAG_MASK);
8559 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8561 BNX2X_DEV_INFO("%s function mode\n",
8562 IS_E1HMF(bp) ? "multi" : "single");
8565 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8567 FUNC_MF_CFG_E1HOV_TAG_MASK);
8568 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8570 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8572 func, bp->e1hov, bp->e1hov);
8574 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8575 " aborting\n", func);
8580 BNX2X_ERR("!!! VN %d in single function mode,"
8581 " aborting\n", BP_E1HVN(bp));
8587 if (!BP_NOMCP(bp)) {
8588 bnx2x_get_port_hwinfo(bp);
8590 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8591 DRV_MSG_SEQ_NUMBER_MASK);
8592 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8596 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8597 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8598 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8599 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8600 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8601 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8602 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8603 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8604 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8605 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8606 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8608 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8616 /* only supposed to happen on emulation/FPGA */
8617 BNX2X_ERR("warning random MAC workaround active\n");
8618 random_ether_addr(bp->dev->dev_addr);
8619 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8625 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8627 int func = BP_FUNC(bp);
8631 /* Disable interrupt handling until HW is initialized */
8632 atomic_set(&bp->intr_sem, 1);
8633 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8635 mutex_init(&bp->port.phy_mutex);
8637 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8638 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8640 rc = bnx2x_get_hwinfo(bp);
8642 /* need to reset chip if undi was active */
8644 bnx2x_undi_unload(bp);
8646 if (CHIP_REV_IS_FPGA(bp))
8647 printk(KERN_ERR PFX "FPGA detected\n");
8649 if (BP_NOMCP(bp) && (func == 0))
8651 "MCP disabled, must load devices in order!\n");
8653 /* Set multi queue mode */
8654 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8655 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8657 "Multi disabled since int_mode requested is not MSI-X\n");
8658 multi_mode = ETH_RSS_MODE_DISABLED;
8660 bp->multi_mode = multi_mode;
8665 bp->flags &= ~TPA_ENABLE_FLAG;
8666 bp->dev->features &= ~NETIF_F_LRO;
8668 bp->flags |= TPA_ENABLE_FLAG;
8669 bp->dev->features |= NETIF_F_LRO;
8674 bp->tx_ring_size = MAX_TX_AVAIL;
8675 bp->rx_ring_size = MAX_RX_AVAIL;
8682 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8683 bp->current_interval = (poll ? poll : timer_interval);
8685 init_timer(&bp->timer);
8686 bp->timer.expires = jiffies + bp->current_interval;
8687 bp->timer.data = (unsigned long) bp;
8688 bp->timer.function = bnx2x_timer;
8694 * ethtool service functions
8697 /* All ethtool functions called with rtnl_lock */
8699 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8701 struct bnx2x *bp = netdev_priv(dev);
8703 cmd->supported = bp->port.supported;
8704 cmd->advertising = bp->port.advertising;
8706 if (netif_carrier_ok(dev)) {
8707 cmd->speed = bp->link_vars.line_speed;
8708 cmd->duplex = bp->link_vars.duplex;
8710 cmd->speed = bp->link_params.req_line_speed;
8711 cmd->duplex = bp->link_params.req_duplex;
8716 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8717 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8718 if (vn_max_rate < cmd->speed)
8719 cmd->speed = vn_max_rate;
8722 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8724 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8726 switch (ext_phy_type) {
8727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8729 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8730 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8731 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8732 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8733 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8734 cmd->port = PORT_FIBRE;
8737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8739 cmd->port = PORT_TP;
8742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8743 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8744 bp->link_params.ext_phy_config);
8748 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8749 bp->link_params.ext_phy_config);
8753 cmd->port = PORT_TP;
8755 cmd->phy_address = bp->mdio.prtad;
8756 cmd->transceiver = XCVR_INTERNAL;
8758 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8759 cmd->autoneg = AUTONEG_ENABLE;
8761 cmd->autoneg = AUTONEG_DISABLE;
8766 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8767 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8768 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8769 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8770 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8771 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8772 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8777 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8779 struct bnx2x *bp = netdev_priv(dev);
8785 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8786 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8787 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8788 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8789 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8790 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8791 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8793 if (cmd->autoneg == AUTONEG_ENABLE) {
8794 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8795 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8799 /* advertise the requested speed and duplex if supported */
8800 cmd->advertising &= bp->port.supported;
8802 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8803 bp->link_params.req_duplex = DUPLEX_FULL;
8804 bp->port.advertising |= (ADVERTISED_Autoneg |
8807 } else { /* forced speed */
8808 /* advertise the requested speed and duplex if supported */
8809 switch (cmd->speed) {
8811 if (cmd->duplex == DUPLEX_FULL) {
8812 if (!(bp->port.supported &
8813 SUPPORTED_10baseT_Full)) {
8815 "10M full not supported\n");
8819 advertising = (ADVERTISED_10baseT_Full |
8822 if (!(bp->port.supported &
8823 SUPPORTED_10baseT_Half)) {
8825 "10M half not supported\n");
8829 advertising = (ADVERTISED_10baseT_Half |
8835 if (cmd->duplex == DUPLEX_FULL) {
8836 if (!(bp->port.supported &
8837 SUPPORTED_100baseT_Full)) {
8839 "100M full not supported\n");
8843 advertising = (ADVERTISED_100baseT_Full |
8846 if (!(bp->port.supported &
8847 SUPPORTED_100baseT_Half)) {
8849 "100M half not supported\n");
8853 advertising = (ADVERTISED_100baseT_Half |
8859 if (cmd->duplex != DUPLEX_FULL) {
8860 DP(NETIF_MSG_LINK, "1G half not supported\n");
8864 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8865 DP(NETIF_MSG_LINK, "1G full not supported\n");
8869 advertising = (ADVERTISED_1000baseT_Full |
8874 if (cmd->duplex != DUPLEX_FULL) {
8876 "2.5G half not supported\n");
8880 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8882 "2.5G full not supported\n");
8886 advertising = (ADVERTISED_2500baseX_Full |
8891 if (cmd->duplex != DUPLEX_FULL) {
8892 DP(NETIF_MSG_LINK, "10G half not supported\n");
8896 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8897 DP(NETIF_MSG_LINK, "10G full not supported\n");
8901 advertising = (ADVERTISED_10000baseT_Full |
8906 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8910 bp->link_params.req_line_speed = cmd->speed;
8911 bp->link_params.req_duplex = cmd->duplex;
8912 bp->port.advertising = advertising;
8915 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8916 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8917 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8918 bp->port.advertising);
8920 if (netif_running(dev)) {
8921 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8928 #define PHY_FW_VER_LEN 10
8930 static void bnx2x_get_drvinfo(struct net_device *dev,
8931 struct ethtool_drvinfo *info)
8933 struct bnx2x *bp = netdev_priv(dev);
8934 u8 phy_fw_ver[PHY_FW_VER_LEN];
8936 strcpy(info->driver, DRV_MODULE_NAME);
8937 strcpy(info->version, DRV_MODULE_VERSION);
8939 phy_fw_ver[0] = '\0';
8941 bnx2x_acquire_phy_lock(bp);
8942 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8943 (bp->state != BNX2X_STATE_CLOSED),
8944 phy_fw_ver, PHY_FW_VER_LEN);
8945 bnx2x_release_phy_lock(bp);
8948 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8949 (bp->common.bc_ver & 0xff0000) >> 16,
8950 (bp->common.bc_ver & 0xff00) >> 8,
8951 (bp->common.bc_ver & 0xff),
8952 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8953 strcpy(info->bus_info, pci_name(bp->pdev));
8954 info->n_stats = BNX2X_NUM_STATS;
8955 info->testinfo_len = BNX2X_NUM_TESTS;
8956 info->eedump_len = bp->common.flash_size;
8957 info->regdump_len = 0;
8960 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8961 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8963 static int bnx2x_get_regs_len(struct net_device *dev)
8965 static u32 regdump_len;
8966 struct bnx2x *bp = netdev_priv(dev);
8972 if (CHIP_IS_E1(bp)) {
8973 for (i = 0; i < REGS_COUNT; i++)
8974 if (IS_E1_ONLINE(reg_addrs[i].info))
8975 regdump_len += reg_addrs[i].size;
8977 for (i = 0; i < WREGS_COUNT_E1; i++)
8978 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8979 regdump_len += wreg_addrs_e1[i].size *
8980 (1 + wreg_addrs_e1[i].read_regs_count);
8983 for (i = 0; i < REGS_COUNT; i++)
8984 if (IS_E1H_ONLINE(reg_addrs[i].info))
8985 regdump_len += reg_addrs[i].size;
8987 for (i = 0; i < WREGS_COUNT_E1H; i++)
8988 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8989 regdump_len += wreg_addrs_e1h[i].size *
8990 (1 + wreg_addrs_e1h[i].read_regs_count);
8993 regdump_len += sizeof(struct dump_hdr);
8998 static void bnx2x_get_regs(struct net_device *dev,
8999 struct ethtool_regs *regs, void *_p)
9002 struct bnx2x *bp = netdev_priv(dev);
9003 struct dump_hdr dump_hdr = {0};
9006 memset(p, 0, regs->len);
9008 if (!netif_running(bp->dev))
9011 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9012 dump_hdr.dump_sign = dump_sign_all;
9013 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9014 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9015 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9016 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9017 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9019 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9020 p += dump_hdr.hdr_size + 1;
9022 if (CHIP_IS_E1(bp)) {
9023 for (i = 0; i < REGS_COUNT; i++)
9024 if (IS_E1_ONLINE(reg_addrs[i].info))
9025 for (j = 0; j < reg_addrs[i].size; j++)
9027 reg_addrs[i].addr + j*4);
9030 for (i = 0; i < REGS_COUNT; i++)
9031 if (IS_E1H_ONLINE(reg_addrs[i].info))
9032 for (j = 0; j < reg_addrs[i].size; j++)
9034 reg_addrs[i].addr + j*4);
9038 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9040 struct bnx2x *bp = netdev_priv(dev);
9042 if (bp->flags & NO_WOL_FLAG) {
9046 wol->supported = WAKE_MAGIC;
9048 wol->wolopts = WAKE_MAGIC;
9052 memset(&wol->sopass, 0, sizeof(wol->sopass));
9055 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9057 struct bnx2x *bp = netdev_priv(dev);
9059 if (wol->wolopts & ~WAKE_MAGIC)
9062 if (wol->wolopts & WAKE_MAGIC) {
9063 if (bp->flags & NO_WOL_FLAG)
9073 static u32 bnx2x_get_msglevel(struct net_device *dev)
9075 struct bnx2x *bp = netdev_priv(dev);
9077 return bp->msglevel;
9080 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9082 struct bnx2x *bp = netdev_priv(dev);
9084 if (capable(CAP_NET_ADMIN))
9085 bp->msglevel = level;
9088 static int bnx2x_nway_reset(struct net_device *dev)
9090 struct bnx2x *bp = netdev_priv(dev);
9095 if (netif_running(dev)) {
9096 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9104 bnx2x_get_link(struct net_device *dev)
9106 struct bnx2x *bp = netdev_priv(dev);
9108 return bp->link_vars.link_up;
9111 static int bnx2x_get_eeprom_len(struct net_device *dev)
9113 struct bnx2x *bp = netdev_priv(dev);
9115 return bp->common.flash_size;
9118 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9120 int port = BP_PORT(bp);
9124 /* adjust timeout for emulation/FPGA */
9125 count = NVRAM_TIMEOUT_COUNT;
9126 if (CHIP_REV_IS_SLOW(bp))
9129 /* request access to nvram interface */
9130 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9131 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9133 for (i = 0; i < count*10; i++) {
9134 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9135 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9141 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9142 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9149 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9151 int port = BP_PORT(bp);
9155 /* adjust timeout for emulation/FPGA */
9156 count = NVRAM_TIMEOUT_COUNT;
9157 if (CHIP_REV_IS_SLOW(bp))
9160 /* relinquish nvram interface */
9161 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9162 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9164 for (i = 0; i < count*10; i++) {
9165 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9166 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9172 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9173 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9180 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9184 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9186 /* enable both bits, even on read */
9187 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9188 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9189 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9192 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9196 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9198 /* disable both bits, even after read */
9199 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9200 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9201 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9204 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9210 /* build the command word */
9211 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9213 /* need to clear DONE bit separately */
9214 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9216 /* address of the NVRAM to read from */
9217 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9218 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9220 /* issue a read command */
9221 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9223 /* adjust timeout for emulation/FPGA */
9224 count = NVRAM_TIMEOUT_COUNT;
9225 if (CHIP_REV_IS_SLOW(bp))
9228 /* wait for completion */
9231 for (i = 0; i < count; i++) {
9233 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9235 if (val & MCPR_NVM_COMMAND_DONE) {
9236 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9237 /* we read nvram data in cpu order
9238 * but ethtool sees it as an array of bytes
9239 * converting to big-endian will do the work */
9240 *ret_val = cpu_to_be32(val);
9249 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9256 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9258 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9263 if (offset + buf_size > bp->common.flash_size) {
9264 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9265 " buf_size (0x%x) > flash_size (0x%x)\n",
9266 offset, buf_size, bp->common.flash_size);
9270 /* request access to nvram interface */
9271 rc = bnx2x_acquire_nvram_lock(bp);
9275 /* enable access to nvram interface */
9276 bnx2x_enable_nvram_access(bp);
9278 /* read the first word(s) */
9279 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9280 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9281 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9282 memcpy(ret_buf, &val, 4);
9284 /* advance to the next dword */
9285 offset += sizeof(u32);
9286 ret_buf += sizeof(u32);
9287 buf_size -= sizeof(u32);
9292 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9293 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9294 memcpy(ret_buf, &val, 4);
9297 /* disable access to nvram interface */
9298 bnx2x_disable_nvram_access(bp);
9299 bnx2x_release_nvram_lock(bp);
9304 static int bnx2x_get_eeprom(struct net_device *dev,
9305 struct ethtool_eeprom *eeprom, u8 *eebuf)
9307 struct bnx2x *bp = netdev_priv(dev);
9310 if (!netif_running(dev))
9313 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9314 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9315 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9316 eeprom->len, eeprom->len);
9318 /* parameters already validated in ethtool_get_eeprom */
9320 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9325 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9330 /* build the command word */
9331 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9333 /* need to clear DONE bit separately */
9334 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9336 /* write the data */
9337 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9339 /* address of the NVRAM to write to */
9340 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9341 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9343 /* issue the write command */
9344 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9346 /* adjust timeout for emulation/FPGA */
9347 count = NVRAM_TIMEOUT_COUNT;
9348 if (CHIP_REV_IS_SLOW(bp))
9351 /* wait for completion */
9353 for (i = 0; i < count; i++) {
9355 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9356 if (val & MCPR_NVM_COMMAND_DONE) {
9365 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9367 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9375 if (offset + buf_size > bp->common.flash_size) {
9376 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9377 " buf_size (0x%x) > flash_size (0x%x)\n",
9378 offset, buf_size, bp->common.flash_size);
9382 /* request access to nvram interface */
9383 rc = bnx2x_acquire_nvram_lock(bp);
9387 /* enable access to nvram interface */
9388 bnx2x_enable_nvram_access(bp);
9390 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9391 align_offset = (offset & ~0x03);
9392 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9395 val &= ~(0xff << BYTE_OFFSET(offset));
9396 val |= (*data_buf << BYTE_OFFSET(offset));
9398 /* nvram data is returned as an array of bytes
9399 * convert it back to cpu order */
9400 val = be32_to_cpu(val);
9402 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9406 /* disable access to nvram interface */
9407 bnx2x_disable_nvram_access(bp);
9408 bnx2x_release_nvram_lock(bp);
9413 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9421 if (buf_size == 1) /* ethtool */
9422 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9424 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9426 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9431 if (offset + buf_size > bp->common.flash_size) {
9432 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9433 " buf_size (0x%x) > flash_size (0x%x)\n",
9434 offset, buf_size, bp->common.flash_size);
9438 /* request access to nvram interface */
9439 rc = bnx2x_acquire_nvram_lock(bp);
9443 /* enable access to nvram interface */
9444 bnx2x_enable_nvram_access(bp);
9447 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9448 while ((written_so_far < buf_size) && (rc == 0)) {
9449 if (written_so_far == (buf_size - sizeof(u32)))
9450 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9451 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9452 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9453 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9454 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9456 memcpy(&val, data_buf, 4);
9458 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9460 /* advance to the next dword */
9461 offset += sizeof(u32);
9462 data_buf += sizeof(u32);
9463 written_so_far += sizeof(u32);
9467 /* disable access to nvram interface */
9468 bnx2x_disable_nvram_access(bp);
9469 bnx2x_release_nvram_lock(bp);
9474 static int bnx2x_set_eeprom(struct net_device *dev,
9475 struct ethtool_eeprom *eeprom, u8 *eebuf)
9477 struct bnx2x *bp = netdev_priv(dev);
9478 int port = BP_PORT(bp);
9481 if (!netif_running(dev))
9484 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9485 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9486 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9487 eeprom->len, eeprom->len);
9489 /* parameters already validated in ethtool_set_eeprom */
9491 /* PHY eeprom can be accessed only by the PMF */
9492 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9496 if (eeprom->magic == 0x50485950) {
9497 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9498 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9500 bnx2x_acquire_phy_lock(bp);
9501 rc |= bnx2x_link_reset(&bp->link_params,
9503 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9504 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9505 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9506 MISC_REGISTERS_GPIO_HIGH, port);
9507 bnx2x_release_phy_lock(bp);
9508 bnx2x_link_report(bp);
9510 } else if (eeprom->magic == 0x50485952) {
9511 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9512 if ((bp->state == BNX2X_STATE_OPEN) ||
9513 (bp->state == BNX2X_STATE_DISABLED)) {
9514 bnx2x_acquire_phy_lock(bp);
9515 rc |= bnx2x_link_reset(&bp->link_params,
9518 rc |= bnx2x_phy_init(&bp->link_params,
9520 bnx2x_release_phy_lock(bp);
9521 bnx2x_calc_fc_adv(bp);
9523 } else if (eeprom->magic == 0x53985943) {
9524 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9525 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9526 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9528 (bp->link_params.ext_phy_config &
9529 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9530 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9532 /* DSP Remove Download Mode */
9533 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9534 MISC_REGISTERS_GPIO_LOW, port);
9536 bnx2x_acquire_phy_lock(bp);
9538 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9540 /* wait 0.5 sec to allow it to run */
9542 bnx2x_ext_phy_hw_reset(bp, port);
9544 bnx2x_release_phy_lock(bp);
9547 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9552 static int bnx2x_get_coalesce(struct net_device *dev,
9553 struct ethtool_coalesce *coal)
9555 struct bnx2x *bp = netdev_priv(dev);
9557 memset(coal, 0, sizeof(struct ethtool_coalesce));
9559 coal->rx_coalesce_usecs = bp->rx_ticks;
9560 coal->tx_coalesce_usecs = bp->tx_ticks;
9565 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9566 static int bnx2x_set_coalesce(struct net_device *dev,
9567 struct ethtool_coalesce *coal)
9569 struct bnx2x *bp = netdev_priv(dev);
9571 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9572 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9573 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9575 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9576 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9577 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9579 if (netif_running(dev))
9580 bnx2x_update_coalesce(bp);
9585 static void bnx2x_get_ringparam(struct net_device *dev,
9586 struct ethtool_ringparam *ering)
9588 struct bnx2x *bp = netdev_priv(dev);
9590 ering->rx_max_pending = MAX_RX_AVAIL;
9591 ering->rx_mini_max_pending = 0;
9592 ering->rx_jumbo_max_pending = 0;
9594 ering->rx_pending = bp->rx_ring_size;
9595 ering->rx_mini_pending = 0;
9596 ering->rx_jumbo_pending = 0;
9598 ering->tx_max_pending = MAX_TX_AVAIL;
9599 ering->tx_pending = bp->tx_ring_size;
9602 static int bnx2x_set_ringparam(struct net_device *dev,
9603 struct ethtool_ringparam *ering)
9605 struct bnx2x *bp = netdev_priv(dev);
9608 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9609 (ering->tx_pending > MAX_TX_AVAIL) ||
9610 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9613 bp->rx_ring_size = ering->rx_pending;
9614 bp->tx_ring_size = ering->tx_pending;
9616 if (netif_running(dev)) {
9617 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9618 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9624 static void bnx2x_get_pauseparam(struct net_device *dev,
9625 struct ethtool_pauseparam *epause)
9627 struct bnx2x *bp = netdev_priv(dev);
9629 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9630 BNX2X_FLOW_CTRL_AUTO) &&
9631 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9633 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9634 BNX2X_FLOW_CTRL_RX);
9635 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9636 BNX2X_FLOW_CTRL_TX);
9638 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9639 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9640 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9643 static int bnx2x_set_pauseparam(struct net_device *dev,
9644 struct ethtool_pauseparam *epause)
9646 struct bnx2x *bp = netdev_priv(dev);
9651 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9652 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9653 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9655 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9657 if (epause->rx_pause)
9658 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9660 if (epause->tx_pause)
9661 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9663 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9664 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9666 if (epause->autoneg) {
9667 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9668 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9672 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9673 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9677 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9679 if (netif_running(dev)) {
9680 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9687 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9689 struct bnx2x *bp = netdev_priv(dev);
9693 /* TPA requires Rx CSUM offloading */
9694 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9695 if (!(dev->features & NETIF_F_LRO)) {
9696 dev->features |= NETIF_F_LRO;
9697 bp->flags |= TPA_ENABLE_FLAG;
9701 } else if (dev->features & NETIF_F_LRO) {
9702 dev->features &= ~NETIF_F_LRO;
9703 bp->flags &= ~TPA_ENABLE_FLAG;
9707 if (changed && netif_running(dev)) {
9708 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9709 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9715 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9717 struct bnx2x *bp = netdev_priv(dev);
9722 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9724 struct bnx2x *bp = netdev_priv(dev);
9729 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9730 TPA'ed packets will be discarded due to wrong TCP CSUM */
9732 u32 flags = ethtool_op_get_flags(dev);
9734 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9740 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9743 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9744 dev->features |= NETIF_F_TSO6;
9746 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9747 dev->features &= ~NETIF_F_TSO6;
9753 static const struct {
9754 char string[ETH_GSTRING_LEN];
9755 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9756 { "register_test (offline)" },
9757 { "memory_test (offline)" },
9758 { "loopback_test (offline)" },
9759 { "nvram_test (online)" },
9760 { "interrupt_test (online)" },
9761 { "link_test (online)" },
9762 { "idle check (online)" }
9765 static int bnx2x_self_test_count(struct net_device *dev)
9767 return BNX2X_NUM_TESTS;
9770 static int bnx2x_test_registers(struct bnx2x *bp)
9772 int idx, i, rc = -ENODEV;
9774 int port = BP_PORT(bp);
9775 static const struct {
9780 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9781 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9782 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9783 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9784 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9785 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9786 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9787 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9788 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9789 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9790 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9791 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9792 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9793 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9794 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9795 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9796 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9797 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9798 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9799 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9800 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9801 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9802 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9803 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9804 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9805 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9806 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9807 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9808 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9809 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9810 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9811 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9812 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9813 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9814 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9815 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9816 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9818 { 0xffffffff, 0, 0x00000000 }
9821 if (!netif_running(bp->dev))
9824 /* Repeat the test twice:
9825 First by writing 0x00000000, second by writing 0xffffffff */
9826 for (idx = 0; idx < 2; idx++) {
9833 wr_val = 0xffffffff;
9837 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9838 u32 offset, mask, save_val, val;
9840 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9841 mask = reg_tbl[i].mask;
9843 save_val = REG_RD(bp, offset);
9845 REG_WR(bp, offset, wr_val);
9846 val = REG_RD(bp, offset);
9848 /* Restore the original register's value */
9849 REG_WR(bp, offset, save_val);
9851 /* verify that value is as expected value */
9852 if ((val & mask) != (wr_val & mask))
9863 static int bnx2x_test_memory(struct bnx2x *bp)
9865 int i, j, rc = -ENODEV;
9867 static const struct {
9871 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9872 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9873 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9874 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9875 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9876 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9877 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9881 static const struct {
9887 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9888 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9889 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9890 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9891 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9892 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9894 { NULL, 0xffffffff, 0, 0 }
9897 if (!netif_running(bp->dev))
9900 /* Go through all the memories */
9901 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9902 for (j = 0; j < mem_tbl[i].size; j++)
9903 REG_RD(bp, mem_tbl[i].offset + j*4);
9905 /* Check the parity status */
9906 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9907 val = REG_RD(bp, prty_tbl[i].offset);
9908 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9909 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9911 "%s is 0x%x\n", prty_tbl[i].name, val);
9922 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9927 while (bnx2x_link_test(bp) && cnt--)
9931 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9933 unsigned int pkt_size, num_pkts, i;
9934 struct sk_buff *skb;
9935 unsigned char *packet;
9936 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9937 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9938 u16 tx_start_idx, tx_idx;
9939 u16 rx_start_idx, rx_idx;
9940 u16 pkt_prod, bd_prod;
9941 struct sw_tx_bd *tx_buf;
9942 struct eth_tx_start_bd *tx_start_bd;
9943 struct eth_tx_parse_bd *pbd = NULL;
9945 union eth_rx_cqe *cqe;
9947 struct sw_rx_bd *rx_buf;
9951 /* check the loopback mode */
9952 switch (loopback_mode) {
9953 case BNX2X_PHY_LOOPBACK:
9954 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9957 case BNX2X_MAC_LOOPBACK:
9958 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9959 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9965 /* prepare the loopback packet */
9966 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9967 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9968 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9971 goto test_loopback_exit;
9973 packet = skb_put(skb, pkt_size);
9974 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9975 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9976 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9977 for (i = ETH_HLEN; i < pkt_size; i++)
9978 packet[i] = (unsigned char) (i & 0xff);
9980 /* send the loopback packet */
9982 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9983 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9985 pkt_prod = fp_tx->tx_pkt_prod++;
9986 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9987 tx_buf->first_bd = fp_tx->tx_bd_prod;
9991 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9992 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
9993 mapping = pci_map_single(bp->pdev, skb->data,
9994 skb_headlen(skb), PCI_DMA_TODEVICE);
9995 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9996 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9997 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9998 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9999 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10000 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10001 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10002 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10004 /* turn on parsing and get a BD */
10005 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10006 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10008 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10012 fp_tx->tx_db.data.prod += 2;
10014 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10019 fp_tx->tx_bd_prod += 2; /* start + pbd */
10020 bp->dev->trans_start = jiffies;
10024 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10025 if (tx_idx != tx_start_idx + num_pkts)
10026 goto test_loopback_exit;
10028 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10029 if (rx_idx != rx_start_idx + num_pkts)
10030 goto test_loopback_exit;
10032 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10033 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10034 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10035 goto test_loopback_rx_exit;
10037 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10038 if (len != pkt_size)
10039 goto test_loopback_rx_exit;
10041 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10043 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10044 for (i = ETH_HLEN; i < pkt_size; i++)
10045 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10046 goto test_loopback_rx_exit;
10050 test_loopback_rx_exit:
10052 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10053 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10054 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10055 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10057 /* Update producers */
10058 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10059 fp_rx->rx_sge_prod);
10061 test_loopback_exit:
10062 bp->link_params.loopback_mode = LOOPBACK_NONE;
10067 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10071 if (!netif_running(bp->dev))
10072 return BNX2X_LOOPBACK_FAILED;
10074 bnx2x_netif_stop(bp, 1);
10075 bnx2x_acquire_phy_lock(bp);
10077 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10079 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10080 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10083 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10085 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10086 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10089 bnx2x_release_phy_lock(bp);
10090 bnx2x_netif_start(bp);
10095 #define CRC32_RESIDUAL 0xdebb20e3
10097 static int bnx2x_test_nvram(struct bnx2x *bp)
10099 static const struct {
10103 { 0, 0x14 }, /* bootstrap */
10104 { 0x14, 0xec }, /* dir */
10105 { 0x100, 0x350 }, /* manuf_info */
10106 { 0x450, 0xf0 }, /* feature_info */
10107 { 0x640, 0x64 }, /* upgrade_key_info */
10109 { 0x708, 0x70 }, /* manuf_key_info */
10113 __be32 buf[0x350 / 4];
10114 u8 *data = (u8 *)buf;
10118 rc = bnx2x_nvram_read(bp, 0, data, 4);
10120 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10121 goto test_nvram_exit;
10124 magic = be32_to_cpu(buf[0]);
10125 if (magic != 0x669955aa) {
10126 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10128 goto test_nvram_exit;
10131 for (i = 0; nvram_tbl[i].size; i++) {
10133 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10134 nvram_tbl[i].size);
10136 DP(NETIF_MSG_PROBE,
10137 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10138 goto test_nvram_exit;
10141 csum = ether_crc_le(nvram_tbl[i].size, data);
10142 if (csum != CRC32_RESIDUAL) {
10143 DP(NETIF_MSG_PROBE,
10144 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10146 goto test_nvram_exit;
10154 static int bnx2x_test_intr(struct bnx2x *bp)
10156 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10159 if (!netif_running(bp->dev))
10162 config->hdr.length = 0;
10163 if (CHIP_IS_E1(bp))
10164 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10166 config->hdr.offset = BP_FUNC(bp);
10167 config->hdr.client_id = bp->fp->cl_id;
10168 config->hdr.reserved1 = 0;
10170 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10171 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10172 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10174 bp->set_mac_pending++;
10175 for (i = 0; i < 10; i++) {
10176 if (!bp->set_mac_pending)
10178 msleep_interruptible(10);
10187 static void bnx2x_self_test(struct net_device *dev,
10188 struct ethtool_test *etest, u64 *buf)
10190 struct bnx2x *bp = netdev_priv(dev);
10192 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10194 if (!netif_running(dev))
10197 /* offline tests are not supported in MF mode */
10199 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10201 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10202 int port = BP_PORT(bp);
10206 /* save current value of input enable for TX port IF */
10207 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10208 /* disable input for TX port IF */
10209 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10211 link_up = bp->link_vars.link_up;
10212 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10213 bnx2x_nic_load(bp, LOAD_DIAG);
10214 /* wait until link state is restored */
10215 bnx2x_wait_for_link(bp, link_up);
10217 if (bnx2x_test_registers(bp) != 0) {
10219 etest->flags |= ETH_TEST_FL_FAILED;
10221 if (bnx2x_test_memory(bp) != 0) {
10223 etest->flags |= ETH_TEST_FL_FAILED;
10225 buf[2] = bnx2x_test_loopback(bp, link_up);
10227 etest->flags |= ETH_TEST_FL_FAILED;
10229 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10231 /* restore input for TX port IF */
10232 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10234 bnx2x_nic_load(bp, LOAD_NORMAL);
10235 /* wait until link state is restored */
10236 bnx2x_wait_for_link(bp, link_up);
10238 if (bnx2x_test_nvram(bp) != 0) {
10240 etest->flags |= ETH_TEST_FL_FAILED;
10242 if (bnx2x_test_intr(bp) != 0) {
10244 etest->flags |= ETH_TEST_FL_FAILED;
10247 if (bnx2x_link_test(bp) != 0) {
10249 etest->flags |= ETH_TEST_FL_FAILED;
10252 #ifdef BNX2X_EXTRA_DEBUG
10253 bnx2x_panic_dump(bp);
10257 static const struct {
10260 u8 string[ETH_GSTRING_LEN];
10261 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10262 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10263 { Q_STATS_OFFSET32(error_bytes_received_hi),
10264 8, "[%d]: rx_error_bytes" },
10265 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10266 8, "[%d]: rx_ucast_packets" },
10267 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10268 8, "[%d]: rx_mcast_packets" },
10269 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10270 8, "[%d]: rx_bcast_packets" },
10271 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10272 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10273 4, "[%d]: rx_phy_ip_err_discards"},
10274 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10275 4, "[%d]: rx_skb_alloc_discard" },
10276 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10278 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10279 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10280 8, "[%d]: tx_packets" }
10283 static const struct {
10287 #define STATS_FLAGS_PORT 1
10288 #define STATS_FLAGS_FUNC 2
10289 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10290 u8 string[ETH_GSTRING_LEN];
10291 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10292 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10293 8, STATS_FLAGS_BOTH, "rx_bytes" },
10294 { STATS_OFFSET32(error_bytes_received_hi),
10295 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10296 { STATS_OFFSET32(total_unicast_packets_received_hi),
10297 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10298 { STATS_OFFSET32(total_multicast_packets_received_hi),
10299 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10300 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10301 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10302 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10303 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10304 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10305 8, STATS_FLAGS_PORT, "rx_align_errors" },
10306 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10307 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10308 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10309 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10310 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10311 8, STATS_FLAGS_PORT, "rx_fragments" },
10312 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10313 8, STATS_FLAGS_PORT, "rx_jabbers" },
10314 { STATS_OFFSET32(no_buff_discard_hi),
10315 8, STATS_FLAGS_BOTH, "rx_discards" },
10316 { STATS_OFFSET32(mac_filter_discard),
10317 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10318 { STATS_OFFSET32(xxoverflow_discard),
10319 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10320 { STATS_OFFSET32(brb_drop_hi),
10321 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10322 { STATS_OFFSET32(brb_truncate_hi),
10323 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10324 { STATS_OFFSET32(pause_frames_received_hi),
10325 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10326 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10327 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10328 { STATS_OFFSET32(nig_timer_max),
10329 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10330 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10331 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10332 { STATS_OFFSET32(rx_skb_alloc_failed),
10333 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10334 { STATS_OFFSET32(hw_csum_err),
10335 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10337 { STATS_OFFSET32(total_bytes_transmitted_hi),
10338 8, STATS_FLAGS_BOTH, "tx_bytes" },
10339 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10340 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10341 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10342 8, STATS_FLAGS_BOTH, "tx_packets" },
10343 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10344 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10345 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10346 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10347 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10348 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10349 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10350 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10351 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10352 8, STATS_FLAGS_PORT, "tx_deferred" },
10353 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10354 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10355 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10356 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10357 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10358 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10359 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10360 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10361 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10362 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10363 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10364 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10365 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10366 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10367 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10368 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10369 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10370 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10371 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10372 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10373 { STATS_OFFSET32(pause_frames_sent_hi),
10374 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10377 #define IS_PORT_STAT(i) \
10378 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10379 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10380 #define IS_E1HMF_MODE_STAT(bp) \
10381 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10383 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10385 struct bnx2x *bp = netdev_priv(dev);
10388 switch (stringset) {
10390 if (is_multi(bp)) {
10392 for_each_rx_queue(bp, i) {
10393 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10394 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10395 bnx2x_q_stats_arr[j].string, i);
10396 k += BNX2X_NUM_Q_STATS;
10398 if (IS_E1HMF_MODE_STAT(bp))
10400 for (j = 0; j < BNX2X_NUM_STATS; j++)
10401 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10402 bnx2x_stats_arr[j].string);
10404 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10405 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10407 strcpy(buf + j*ETH_GSTRING_LEN,
10408 bnx2x_stats_arr[i].string);
10415 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10420 static int bnx2x_get_stats_count(struct net_device *dev)
10422 struct bnx2x *bp = netdev_priv(dev);
10425 if (is_multi(bp)) {
10426 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10427 if (!IS_E1HMF_MODE_STAT(bp))
10428 num_stats += BNX2X_NUM_STATS;
10430 if (IS_E1HMF_MODE_STAT(bp)) {
10432 for (i = 0; i < BNX2X_NUM_STATS; i++)
10433 if (IS_FUNC_STAT(i))
10436 num_stats = BNX2X_NUM_STATS;
10442 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10443 struct ethtool_stats *stats, u64 *buf)
10445 struct bnx2x *bp = netdev_priv(dev);
10446 u32 *hw_stats, *offset;
10449 if (is_multi(bp)) {
10451 for_each_rx_queue(bp, i) {
10452 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10453 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10454 if (bnx2x_q_stats_arr[j].size == 0) {
10455 /* skip this counter */
10459 offset = (hw_stats +
10460 bnx2x_q_stats_arr[j].offset);
10461 if (bnx2x_q_stats_arr[j].size == 4) {
10462 /* 4-byte counter */
10463 buf[k + j] = (u64) *offset;
10466 /* 8-byte counter */
10467 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10469 k += BNX2X_NUM_Q_STATS;
10471 if (IS_E1HMF_MODE_STAT(bp))
10473 hw_stats = (u32 *)&bp->eth_stats;
10474 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10475 if (bnx2x_stats_arr[j].size == 0) {
10476 /* skip this counter */
10480 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10481 if (bnx2x_stats_arr[j].size == 4) {
10482 /* 4-byte counter */
10483 buf[k + j] = (u64) *offset;
10486 /* 8-byte counter */
10487 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10490 hw_stats = (u32 *)&bp->eth_stats;
10491 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10492 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10494 if (bnx2x_stats_arr[i].size == 0) {
10495 /* skip this counter */
10500 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10501 if (bnx2x_stats_arr[i].size == 4) {
10502 /* 4-byte counter */
10503 buf[j] = (u64) *offset;
10507 /* 8-byte counter */
10508 buf[j] = HILO_U64(*offset, *(offset + 1));
10514 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10516 struct bnx2x *bp = netdev_priv(dev);
10517 int port = BP_PORT(bp);
10520 if (!netif_running(dev))
10529 for (i = 0; i < (data * 2); i++) {
10531 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10532 bp->link_params.hw_led_mode,
10533 bp->link_params.chip_id);
10535 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10536 bp->link_params.hw_led_mode,
10537 bp->link_params.chip_id);
10539 msleep_interruptible(500);
10540 if (signal_pending(current))
10544 if (bp->link_vars.link_up)
10545 bnx2x_set_led(bp, port, LED_MODE_OPER,
10546 bp->link_vars.line_speed,
10547 bp->link_params.hw_led_mode,
10548 bp->link_params.chip_id);
10553 static struct ethtool_ops bnx2x_ethtool_ops = {
10554 .get_settings = bnx2x_get_settings,
10555 .set_settings = bnx2x_set_settings,
10556 .get_drvinfo = bnx2x_get_drvinfo,
10557 .get_regs_len = bnx2x_get_regs_len,
10558 .get_regs = bnx2x_get_regs,
10559 .get_wol = bnx2x_get_wol,
10560 .set_wol = bnx2x_set_wol,
10561 .get_msglevel = bnx2x_get_msglevel,
10562 .set_msglevel = bnx2x_set_msglevel,
10563 .nway_reset = bnx2x_nway_reset,
10564 .get_link = bnx2x_get_link,
10565 .get_eeprom_len = bnx2x_get_eeprom_len,
10566 .get_eeprom = bnx2x_get_eeprom,
10567 .set_eeprom = bnx2x_set_eeprom,
10568 .get_coalesce = bnx2x_get_coalesce,
10569 .set_coalesce = bnx2x_set_coalesce,
10570 .get_ringparam = bnx2x_get_ringparam,
10571 .set_ringparam = bnx2x_set_ringparam,
10572 .get_pauseparam = bnx2x_get_pauseparam,
10573 .set_pauseparam = bnx2x_set_pauseparam,
10574 .get_rx_csum = bnx2x_get_rx_csum,
10575 .set_rx_csum = bnx2x_set_rx_csum,
10576 .get_tx_csum = ethtool_op_get_tx_csum,
10577 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10578 .set_flags = bnx2x_set_flags,
10579 .get_flags = ethtool_op_get_flags,
10580 .get_sg = ethtool_op_get_sg,
10581 .set_sg = ethtool_op_set_sg,
10582 .get_tso = ethtool_op_get_tso,
10583 .set_tso = bnx2x_set_tso,
10584 .self_test_count = bnx2x_self_test_count,
10585 .self_test = bnx2x_self_test,
10586 .get_strings = bnx2x_get_strings,
10587 .phys_id = bnx2x_phys_id,
10588 .get_stats_count = bnx2x_get_stats_count,
10589 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10592 /* end of ethtool_ops */
10594 /****************************************************************************
10595 * General service functions
10596 ****************************************************************************/
10598 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10602 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10606 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10607 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10608 PCI_PM_CTRL_PME_STATUS));
10610 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10611 /* delay required during transition out of D3hot */
10616 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10620 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10622 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10625 /* No more memory access after this point until
10626 * device is brought back to D0.
10636 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10640 /* Tell compiler that status block fields can change */
10642 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10643 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10645 return (fp->rx_comp_cons != rx_cons_sb);
10649 * net_device service functions
10652 static int bnx2x_poll(struct napi_struct *napi, int budget)
10654 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10656 struct bnx2x *bp = fp->bp;
10659 #ifdef BNX2X_STOP_ON_ERROR
10660 if (unlikely(bp->panic))
10664 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10665 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10667 bnx2x_update_fpsb_idx(fp);
10669 if (bnx2x_has_rx_work(fp)) {
10670 work_done = bnx2x_rx_int(fp, budget);
10672 /* must not complete if we consumed full budget */
10673 if (work_done >= budget)
10677 /* bnx2x_has_rx_work() reads the status block, thus we need to
10678 * ensure that status block indices have been actually read
10679 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10680 * so that we won't write the "newer" value of the status block to IGU
10681 * (if there was a DMA right after bnx2x_has_rx_work and
10682 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10683 * may be postponed to right before bnx2x_ack_sb). In this case
10684 * there will never be another interrupt until there is another update
10685 * of the status block, while there is still unhandled work.
10689 if (!bnx2x_has_rx_work(fp)) {
10690 #ifdef BNX2X_STOP_ON_ERROR
10693 napi_complete(napi);
10695 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10696 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10697 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10698 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10706 /* we split the first BD into headers and data BDs
10707 * to ease the pain of our fellow microcode engineers
10708 * we use one mapping for both BDs
10709 * So far this has only been observed to happen
10710 * in Other Operating Systems(TM)
10712 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10713 struct bnx2x_fastpath *fp,
10714 struct sw_tx_bd *tx_buf,
10715 struct eth_tx_start_bd **tx_bd, u16 hlen,
10716 u16 bd_prod, int nbd)
10718 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10719 struct eth_tx_bd *d_tx_bd;
10720 dma_addr_t mapping;
10721 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10723 /* first fix first BD */
10724 h_tx_bd->nbd = cpu_to_le16(nbd);
10725 h_tx_bd->nbytes = cpu_to_le16(hlen);
10727 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10728 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10729 h_tx_bd->addr_lo, h_tx_bd->nbd);
10731 /* now get a new data BD
10732 * (after the pbd) and fill it */
10733 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10734 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10736 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10737 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10739 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10740 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10741 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10743 /* this marks the BD as one that has no individual mapping */
10744 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10746 DP(NETIF_MSG_TX_QUEUED,
10747 "TSO split data size is %d (%x:%x)\n",
10748 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10751 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10756 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10759 csum = (u16) ~csum_fold(csum_sub(csum,
10760 csum_partial(t_header - fix, fix, 0)));
10763 csum = (u16) ~csum_fold(csum_add(csum,
10764 csum_partial(t_header, -fix, 0)));
10766 return swab16(csum);
10769 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10773 if (skb->ip_summed != CHECKSUM_PARTIAL)
10777 if (skb->protocol == htons(ETH_P_IPV6)) {
10779 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10780 rc |= XMIT_CSUM_TCP;
10784 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10785 rc |= XMIT_CSUM_TCP;
10789 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10792 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10798 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10799 /* check if packet requires linearization (packet is too fragmented)
10800 no need to check fragmentation if page size > 8K (there will be no
10801 violation to FW restrictions) */
10802 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10807 int first_bd_sz = 0;
10809 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10810 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10812 if (xmit_type & XMIT_GSO) {
10813 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10814 /* Check if LSO packet needs to be copied:
10815 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10816 int wnd_size = MAX_FETCH_BD - 3;
10817 /* Number of windows to check */
10818 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10823 /* Headers length */
10824 hlen = (int)(skb_transport_header(skb) - skb->data) +
10827 /* Amount of data (w/o headers) on linear part of SKB*/
10828 first_bd_sz = skb_headlen(skb) - hlen;
10830 wnd_sum = first_bd_sz;
10832 /* Calculate the first sum - it's special */
10833 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10835 skb_shinfo(skb)->frags[frag_idx].size;
10837 /* If there was data on linear skb data - check it */
10838 if (first_bd_sz > 0) {
10839 if (unlikely(wnd_sum < lso_mss)) {
10844 wnd_sum -= first_bd_sz;
10847 /* Others are easier: run through the frag list and
10848 check all windows */
10849 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10851 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10853 if (unlikely(wnd_sum < lso_mss)) {
10858 skb_shinfo(skb)->frags[wnd_idx].size;
10861 /* in non-LSO too fragmented packet should always
10868 if (unlikely(to_copy))
10869 DP(NETIF_MSG_TX_QUEUED,
10870 "Linearization IS REQUIRED for %s packet. "
10871 "num_frags %d hlen %d first_bd_sz %d\n",
10872 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10873 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10879 /* called with netif_tx_lock
10880 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10881 * netif_wake_queue()
10883 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10885 struct bnx2x *bp = netdev_priv(dev);
10886 struct bnx2x_fastpath *fp, *fp_stat;
10887 struct netdev_queue *txq;
10888 struct sw_tx_bd *tx_buf;
10889 struct eth_tx_start_bd *tx_start_bd;
10890 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10891 struct eth_tx_parse_bd *pbd = NULL;
10892 u16 pkt_prod, bd_prod;
10894 dma_addr_t mapping;
10895 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10898 __le16 pkt_size = 0;
10900 #ifdef BNX2X_STOP_ON_ERROR
10901 if (unlikely(bp->panic))
10902 return NETDEV_TX_BUSY;
10905 fp_index = skb_get_queue_mapping(skb);
10906 txq = netdev_get_tx_queue(dev, fp_index);
10908 fp = &bp->fp[fp_index + bp->num_rx_queues];
10909 fp_stat = &bp->fp[fp_index];
10911 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10912 fp_stat->eth_q_stats.driver_xoff++;
10913 netif_tx_stop_queue(txq);
10914 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10915 return NETDEV_TX_BUSY;
10918 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10919 " gso type %x xmit_type %x\n",
10920 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10921 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10923 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10924 /* First, check if we need to linearize the skb (due to FW
10925 restrictions). No need to check fragmentation if page size > 8K
10926 (there will be no violation to FW restrictions) */
10927 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10928 /* Statistics of linearization */
10930 if (skb_linearize(skb) != 0) {
10931 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10932 "silently dropping this SKB\n");
10933 dev_kfree_skb_any(skb);
10934 return NETDEV_TX_OK;
10940 Please read carefully. First we use one BD which we mark as start,
10941 then we have a parsing info BD (used for TSO or xsum),
10942 and only then we have the rest of the TSO BDs.
10943 (don't forget to mark the last one as last,
10944 and to unmap only AFTER you write to the BD ...)
10945 And above all, all pdb sizes are in words - NOT DWORDS!
10948 pkt_prod = fp->tx_pkt_prod++;
10949 bd_prod = TX_BD(fp->tx_bd_prod);
10951 /* get a tx_buf and first BD */
10952 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10953 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10955 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10956 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10957 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10959 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10961 /* remember the first BD of the packet */
10962 tx_buf->first_bd = fp->tx_bd_prod;
10966 DP(NETIF_MSG_TX_QUEUED,
10967 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10968 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10971 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10972 (bp->flags & HW_VLAN_TX_FLAG)) {
10973 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10974 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10977 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10979 /* turn on parsing and get a BD */
10980 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10981 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
10983 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10985 if (xmit_type & XMIT_CSUM) {
10986 hlen = (skb_network_header(skb) - skb->data) / 2;
10988 /* for now NS flag is not used in Linux */
10990 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10991 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10993 pbd->ip_hlen = (skb_transport_header(skb) -
10994 skb_network_header(skb)) / 2;
10996 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10998 pbd->total_hlen = cpu_to_le16(hlen);
11001 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11003 if (xmit_type & XMIT_CSUM_V4)
11004 tx_start_bd->bd_flags.as_bitfield |=
11005 ETH_TX_BD_FLAGS_IP_CSUM;
11007 tx_start_bd->bd_flags.as_bitfield |=
11008 ETH_TX_BD_FLAGS_IPV6;
11010 if (xmit_type & XMIT_CSUM_TCP) {
11011 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11014 s8 fix = SKB_CS_OFF(skb); /* signed! */
11016 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11018 DP(NETIF_MSG_TX_QUEUED,
11019 "hlen %d fix %d csum before fix %x\n",
11020 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11022 /* HW bug: fixup the CSUM */
11023 pbd->tcp_pseudo_csum =
11024 bnx2x_csum_fix(skb_transport_header(skb),
11027 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11028 pbd->tcp_pseudo_csum);
11032 mapping = pci_map_single(bp->pdev, skb->data,
11033 skb_headlen(skb), PCI_DMA_TODEVICE);
11035 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11036 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11037 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11038 tx_start_bd->nbd = cpu_to_le16(nbd);
11039 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11040 pkt_size = tx_start_bd->nbytes;
11042 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11043 " nbytes %d flags %x vlan %x\n",
11044 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11045 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11046 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11048 if (xmit_type & XMIT_GSO) {
11050 DP(NETIF_MSG_TX_QUEUED,
11051 "TSO packet len %d hlen %d total len %d tso size %d\n",
11052 skb->len, hlen, skb_headlen(skb),
11053 skb_shinfo(skb)->gso_size);
11055 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11057 if (unlikely(skb_headlen(skb) > hlen))
11058 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11059 hlen, bd_prod, ++nbd);
11061 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11062 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11063 pbd->tcp_flags = pbd_tcp_flags(skb);
11065 if (xmit_type & XMIT_GSO_V4) {
11066 pbd->ip_id = swab16(ip_hdr(skb)->id);
11067 pbd->tcp_pseudo_csum =
11068 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11069 ip_hdr(skb)->daddr,
11070 0, IPPROTO_TCP, 0));
11073 pbd->tcp_pseudo_csum =
11074 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11075 &ipv6_hdr(skb)->daddr,
11076 0, IPPROTO_TCP, 0));
11078 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11080 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11082 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11083 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11085 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11086 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11087 if (total_pkt_bd == NULL)
11088 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11090 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11091 frag->size, PCI_DMA_TODEVICE);
11093 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11094 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11095 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11096 le16_add_cpu(&pkt_size, frag->size);
11098 DP(NETIF_MSG_TX_QUEUED,
11099 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11100 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11101 le16_to_cpu(tx_data_bd->nbytes));
11104 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11106 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11108 /* now send a tx doorbell, counting the next BD
11109 * if the packet contains or ends with it
11111 if (TX_BD_POFF(bd_prod) < nbd)
11114 if (total_pkt_bd != NULL)
11115 total_pkt_bd->total_pkt_bytes = pkt_size;
11118 DP(NETIF_MSG_TX_QUEUED,
11119 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11120 " tcp_flags %x xsum %x seq %u hlen %u\n",
11121 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11122 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11123 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11125 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11128 * Make sure that the BD data is updated before updating the producer
11129 * since FW might read the BD right after the producer is updated.
11130 * This is only applicable for weak-ordered memory model archs such
11131 * as IA-64. The following barrier is also mandatory since FW will
11132 * assumes packets must have BDs.
11136 fp->tx_db.data.prod += nbd;
11138 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11142 fp->tx_bd_prod += nbd;
11144 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11145 netif_tx_stop_queue(txq);
11146 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11147 if we put Tx into XOFF state. */
11149 fp_stat->eth_q_stats.driver_xoff++;
11150 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11151 netif_tx_wake_queue(txq);
11155 return NETDEV_TX_OK;
11158 /* called with rtnl_lock */
11159 static int bnx2x_open(struct net_device *dev)
11161 struct bnx2x *bp = netdev_priv(dev);
11163 netif_carrier_off(dev);
11165 bnx2x_set_power_state(bp, PCI_D0);
11167 return bnx2x_nic_load(bp, LOAD_OPEN);
11170 /* called with rtnl_lock */
11171 static int bnx2x_close(struct net_device *dev)
11173 struct bnx2x *bp = netdev_priv(dev);
11175 /* Unload the driver, release IRQs */
11176 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11177 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11178 if (!CHIP_REV_IS_SLOW(bp))
11179 bnx2x_set_power_state(bp, PCI_D3hot);
11184 /* called with netif_tx_lock from dev_mcast.c */
11185 static void bnx2x_set_rx_mode(struct net_device *dev)
11187 struct bnx2x *bp = netdev_priv(dev);
11188 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11189 int port = BP_PORT(bp);
11191 if (bp->state != BNX2X_STATE_OPEN) {
11192 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11196 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11198 if (dev->flags & IFF_PROMISC)
11199 rx_mode = BNX2X_RX_MODE_PROMISC;
11201 else if ((dev->flags & IFF_ALLMULTI) ||
11202 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11203 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11205 else { /* some multicasts */
11206 if (CHIP_IS_E1(bp)) {
11207 int i, old, offset;
11208 struct dev_mc_list *mclist;
11209 struct mac_configuration_cmd *config =
11210 bnx2x_sp(bp, mcast_config);
11212 for (i = 0, mclist = dev->mc_list;
11213 mclist && (i < dev->mc_count);
11214 i++, mclist = mclist->next) {
11216 config->config_table[i].
11217 cam_entry.msb_mac_addr =
11218 swab16(*(u16 *)&mclist->dmi_addr[0]);
11219 config->config_table[i].
11220 cam_entry.middle_mac_addr =
11221 swab16(*(u16 *)&mclist->dmi_addr[2]);
11222 config->config_table[i].
11223 cam_entry.lsb_mac_addr =
11224 swab16(*(u16 *)&mclist->dmi_addr[4]);
11225 config->config_table[i].cam_entry.flags =
11227 config->config_table[i].
11228 target_table_entry.flags = 0;
11229 config->config_table[i].target_table_entry.
11230 clients_bit_vector =
11231 cpu_to_le32(1 << BP_L_ID(bp));
11232 config->config_table[i].
11233 target_table_entry.vlan_id = 0;
11236 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11237 config->config_table[i].
11238 cam_entry.msb_mac_addr,
11239 config->config_table[i].
11240 cam_entry.middle_mac_addr,
11241 config->config_table[i].
11242 cam_entry.lsb_mac_addr);
11244 old = config->hdr.length;
11246 for (; i < old; i++) {
11247 if (CAM_IS_INVALID(config->
11248 config_table[i])) {
11249 /* already invalidated */
11253 CAM_INVALIDATE(config->
11258 if (CHIP_REV_IS_SLOW(bp))
11259 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11261 offset = BNX2X_MAX_MULTICAST*(1 + port);
11263 config->hdr.length = i;
11264 config->hdr.offset = offset;
11265 config->hdr.client_id = bp->fp->cl_id;
11266 config->hdr.reserved1 = 0;
11268 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11269 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11270 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11273 /* Accept one or more multicasts */
11274 struct dev_mc_list *mclist;
11275 u32 mc_filter[MC_HASH_SIZE];
11276 u32 crc, bit, regidx;
11279 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11281 for (i = 0, mclist = dev->mc_list;
11282 mclist && (i < dev->mc_count);
11283 i++, mclist = mclist->next) {
11285 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11288 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11289 bit = (crc >> 24) & 0xff;
11292 mc_filter[regidx] |= (1 << bit);
11295 for (i = 0; i < MC_HASH_SIZE; i++)
11296 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11301 bp->rx_mode = rx_mode;
11302 bnx2x_set_storm_rx_mode(bp);
11305 /* called with rtnl_lock */
11306 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11308 struct sockaddr *addr = p;
11309 struct bnx2x *bp = netdev_priv(dev);
11311 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11314 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11315 if (netif_running(dev)) {
11316 if (CHIP_IS_E1(bp))
11317 bnx2x_set_mac_addr_e1(bp, 1);
11319 bnx2x_set_mac_addr_e1h(bp, 1);
11325 /* called with rtnl_lock */
11326 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11327 int devad, u16 addr)
11329 struct bnx2x *bp = netdev_priv(netdev);
11332 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11334 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11335 prtad, devad, addr);
11337 if (prtad != bp->mdio.prtad) {
11338 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11339 prtad, bp->mdio.prtad);
11343 /* The HW expects different devad if CL22 is used */
11344 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11346 bnx2x_acquire_phy_lock(bp);
11347 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11348 devad, addr, &value);
11349 bnx2x_release_phy_lock(bp);
11350 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11357 /* called with rtnl_lock */
11358 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11359 u16 addr, u16 value)
11361 struct bnx2x *bp = netdev_priv(netdev);
11362 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11365 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11366 " value 0x%x\n", prtad, devad, addr, value);
11368 if (prtad != bp->mdio.prtad) {
11369 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11370 prtad, bp->mdio.prtad);
11374 /* The HW expects different devad if CL22 is used */
11375 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11377 bnx2x_acquire_phy_lock(bp);
11378 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11379 devad, addr, value);
11380 bnx2x_release_phy_lock(bp);
11384 /* called with rtnl_lock */
11385 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11387 struct bnx2x *bp = netdev_priv(dev);
11388 struct mii_ioctl_data *mdio = if_mii(ifr);
11390 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11391 mdio->phy_id, mdio->reg_num, mdio->val_in);
11393 if (!netif_running(dev))
11396 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11399 /* called with rtnl_lock */
11400 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11402 struct bnx2x *bp = netdev_priv(dev);
11405 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11406 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11409 /* This does not race with packet allocation
11410 * because the actual alloc size is
11411 * only updated as part of load
11413 dev->mtu = new_mtu;
11415 if (netif_running(dev)) {
11416 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11417 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11423 static void bnx2x_tx_timeout(struct net_device *dev)
11425 struct bnx2x *bp = netdev_priv(dev);
11427 #ifdef BNX2X_STOP_ON_ERROR
11431 /* This allows the netif to be shutdown gracefully before resetting */
11432 schedule_work(&bp->reset_task);
11436 /* called with rtnl_lock */
11437 static void bnx2x_vlan_rx_register(struct net_device *dev,
11438 struct vlan_group *vlgrp)
11440 struct bnx2x *bp = netdev_priv(dev);
11444 /* Set flags according to the required capabilities */
11445 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11447 if (dev->features & NETIF_F_HW_VLAN_TX)
11448 bp->flags |= HW_VLAN_TX_FLAG;
11450 if (dev->features & NETIF_F_HW_VLAN_RX)
11451 bp->flags |= HW_VLAN_RX_FLAG;
11453 if (netif_running(dev))
11454 bnx2x_set_client_config(bp);
11459 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11460 static void poll_bnx2x(struct net_device *dev)
11462 struct bnx2x *bp = netdev_priv(dev);
11464 disable_irq(bp->pdev->irq);
11465 bnx2x_interrupt(bp->pdev->irq, dev);
11466 enable_irq(bp->pdev->irq);
11470 static const struct net_device_ops bnx2x_netdev_ops = {
11471 .ndo_open = bnx2x_open,
11472 .ndo_stop = bnx2x_close,
11473 .ndo_start_xmit = bnx2x_start_xmit,
11474 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11475 .ndo_set_mac_address = bnx2x_change_mac_addr,
11476 .ndo_validate_addr = eth_validate_addr,
11477 .ndo_do_ioctl = bnx2x_ioctl,
11478 .ndo_change_mtu = bnx2x_change_mtu,
11479 .ndo_tx_timeout = bnx2x_tx_timeout,
11481 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11483 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11484 .ndo_poll_controller = poll_bnx2x,
11488 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11489 struct net_device *dev)
11494 SET_NETDEV_DEV(dev, &pdev->dev);
11495 bp = netdev_priv(dev);
11500 bp->func = PCI_FUNC(pdev->devfn);
11502 rc = pci_enable_device(pdev);
11504 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11508 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11509 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11512 goto err_out_disable;
11515 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11516 printk(KERN_ERR PFX "Cannot find second PCI device"
11517 " base address, aborting\n");
11519 goto err_out_disable;
11522 if (atomic_read(&pdev->enable_cnt) == 1) {
11523 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11525 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11527 goto err_out_disable;
11530 pci_set_master(pdev);
11531 pci_save_state(pdev);
11534 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11535 if (bp->pm_cap == 0) {
11536 printk(KERN_ERR PFX "Cannot find power management"
11537 " capability, aborting\n");
11539 goto err_out_release;
11542 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11543 if (bp->pcie_cap == 0) {
11544 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11547 goto err_out_release;
11550 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11551 bp->flags |= USING_DAC_FLAG;
11552 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11553 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11554 " failed, aborting\n");
11556 goto err_out_release;
11559 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11560 printk(KERN_ERR PFX "System does not support DMA,"
11563 goto err_out_release;
11566 dev->mem_start = pci_resource_start(pdev, 0);
11567 dev->base_addr = dev->mem_start;
11568 dev->mem_end = pci_resource_end(pdev, 0);
11570 dev->irq = pdev->irq;
11572 bp->regview = pci_ioremap_bar(pdev, 0);
11573 if (!bp->regview) {
11574 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11576 goto err_out_release;
11579 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11580 min_t(u64, BNX2X_DB_SIZE,
11581 pci_resource_len(pdev, 2)));
11582 if (!bp->doorbells) {
11583 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11585 goto err_out_unmap;
11588 bnx2x_set_power_state(bp, PCI_D0);
11590 /* clean indirect addresses */
11591 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11592 PCICFG_VENDOR_ID_OFFSET);
11593 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11594 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11595 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11596 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11598 dev->watchdog_timeo = TX_TIMEOUT;
11600 dev->netdev_ops = &bnx2x_netdev_ops;
11601 dev->ethtool_ops = &bnx2x_ethtool_ops;
11602 dev->features |= NETIF_F_SG;
11603 dev->features |= NETIF_F_HW_CSUM;
11604 if (bp->flags & USING_DAC_FLAG)
11605 dev->features |= NETIF_F_HIGHDMA;
11606 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11607 dev->features |= NETIF_F_TSO6;
11609 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11610 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11612 dev->vlan_features |= NETIF_F_SG;
11613 dev->vlan_features |= NETIF_F_HW_CSUM;
11614 if (bp->flags & USING_DAC_FLAG)
11615 dev->vlan_features |= NETIF_F_HIGHDMA;
11616 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11617 dev->vlan_features |= NETIF_F_TSO6;
11620 /* get_port_hwinfo() will set prtad and mmds properly */
11621 bp->mdio.prtad = MDIO_PRTAD_NONE;
11623 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11624 bp->mdio.dev = dev;
11625 bp->mdio.mdio_read = bnx2x_mdio_read;
11626 bp->mdio.mdio_write = bnx2x_mdio_write;
11632 iounmap(bp->regview);
11633 bp->regview = NULL;
11635 if (bp->doorbells) {
11636 iounmap(bp->doorbells);
11637 bp->doorbells = NULL;
11641 if (atomic_read(&pdev->enable_cnt) == 1)
11642 pci_release_regions(pdev);
11645 pci_disable_device(pdev);
11646 pci_set_drvdata(pdev, NULL);
11652 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11654 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11656 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11660 /* return value of 1=2.5GHz 2=5GHz */
11661 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11663 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11665 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11668 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11670 struct bnx2x_fw_file_hdr *fw_hdr;
11671 struct bnx2x_fw_file_section *sections;
11673 u32 offset, len, num_ops;
11675 const struct firmware *firmware = bp->firmware;
11678 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11681 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11682 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11684 /* Make sure none of the offsets and sizes make us read beyond
11685 * the end of the firmware data */
11686 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11687 offset = be32_to_cpu(sections[i].offset);
11688 len = be32_to_cpu(sections[i].len);
11689 if (offset + len > firmware->size) {
11690 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11695 /* Likewise for the init_ops offsets */
11696 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11697 ops_offsets = (u16 *)(firmware->data + offset);
11698 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11700 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11701 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11702 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11707 /* Check FW version */
11708 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11709 fw_ver = firmware->data + offset;
11710 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11711 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11712 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11713 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11714 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11715 " Should be %d.%d.%d.%d\n",
11716 fw_ver[0], fw_ver[1], fw_ver[2],
11717 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11718 BCM_5710_FW_MINOR_VERSION,
11719 BCM_5710_FW_REVISION_VERSION,
11720 BCM_5710_FW_ENGINEERING_VERSION);
11727 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11730 const __be32 *source = (const __be32*)_source;
11731 u32 *target = (u32*)_target;
11733 for (i = 0; i < n/4; i++)
11734 target[i] = be32_to_cpu(source[i]);
11738 Ops array is stored in the following format:
11739 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11741 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11744 const __be32 *source = (const __be32*)_source;
11745 struct raw_op *target = (struct raw_op*)_target;
11747 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11748 tmp = be32_to_cpu(source[j]);
11749 target[i].op = (tmp >> 24) & 0xff;
11750 target[i].offset = tmp & 0xffffff;
11751 target[i].raw_data = be32_to_cpu(source[j+1]);
11754 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11757 u16 *target = (u16*)_target;
11758 const __be16 *source = (const __be16*)_source;
11760 for (i = 0; i < n/2; i++)
11761 target[i] = be16_to_cpu(source[i]);
11764 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11766 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11767 bp->arr = kmalloc(len, GFP_KERNEL); \
11769 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11772 func(bp->firmware->data + \
11773 be32_to_cpu(fw_hdr->arr.offset), \
11774 (u8*)bp->arr, len); \
11778 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11780 char fw_file_name[40] = {0};
11782 struct bnx2x_fw_file_hdr *fw_hdr;
11784 /* Create a FW file name */
11785 if (CHIP_IS_E1(bp))
11786 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11788 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11790 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11791 BCM_5710_FW_MAJOR_VERSION,
11792 BCM_5710_FW_MINOR_VERSION,
11793 BCM_5710_FW_REVISION_VERSION,
11794 BCM_5710_FW_ENGINEERING_VERSION);
11796 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11798 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11800 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11801 goto request_firmware_exit;
11804 rc = bnx2x_check_firmware(bp);
11806 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11807 goto request_firmware_exit;
11810 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11812 /* Initialize the pointers to the init arrays */
11814 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11817 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11820 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11822 /* STORMs firmware */
11823 bp->tsem_int_table_data = bp->firmware->data +
11824 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11825 bp->tsem_pram_data = bp->firmware->data +
11826 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11827 bp->usem_int_table_data = bp->firmware->data +
11828 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11829 bp->usem_pram_data = bp->firmware->data +
11830 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11831 bp->xsem_int_table_data = bp->firmware->data +
11832 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11833 bp->xsem_pram_data = bp->firmware->data +
11834 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11835 bp->csem_int_table_data = bp->firmware->data +
11836 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11837 bp->csem_pram_data = bp->firmware->data +
11838 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11841 init_offsets_alloc_err:
11842 kfree(bp->init_ops);
11843 init_ops_alloc_err:
11844 kfree(bp->init_data);
11845 request_firmware_exit:
11846 release_firmware(bp->firmware);
11853 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11854 const struct pci_device_id *ent)
11856 static int version_printed;
11857 struct net_device *dev = NULL;
11861 if (version_printed++ == 0)
11862 printk(KERN_INFO "%s", version);
11864 /* dev zeroed in init_etherdev */
11865 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11867 printk(KERN_ERR PFX "Cannot allocate net device\n");
11871 bp = netdev_priv(dev);
11872 bp->msglevel = debug;
11874 rc = bnx2x_init_dev(pdev, dev);
11880 pci_set_drvdata(pdev, dev);
11882 rc = bnx2x_init_bp(bp);
11884 goto init_one_exit;
11886 /* Set init arrays */
11887 rc = bnx2x_init_firmware(bp, &pdev->dev);
11889 printk(KERN_ERR PFX "Error loading firmware\n");
11890 goto init_one_exit;
11893 rc = register_netdev(dev);
11895 dev_err(&pdev->dev, "Cannot register net device\n");
11896 goto init_one_exit;
11899 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11900 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11901 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11902 bnx2x_get_pcie_width(bp),
11903 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11904 dev->base_addr, bp->pdev->irq);
11905 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11911 iounmap(bp->regview);
11914 iounmap(bp->doorbells);
11918 if (atomic_read(&pdev->enable_cnt) == 1)
11919 pci_release_regions(pdev);
11921 pci_disable_device(pdev);
11922 pci_set_drvdata(pdev, NULL);
11927 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11929 struct net_device *dev = pci_get_drvdata(pdev);
11933 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11936 bp = netdev_priv(dev);
11938 unregister_netdev(dev);
11940 kfree(bp->init_ops_offsets);
11941 kfree(bp->init_ops);
11942 kfree(bp->init_data);
11943 release_firmware(bp->firmware);
11946 iounmap(bp->regview);
11949 iounmap(bp->doorbells);
11953 if (atomic_read(&pdev->enable_cnt) == 1)
11954 pci_release_regions(pdev);
11956 pci_disable_device(pdev);
11957 pci_set_drvdata(pdev, NULL);
11960 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11962 struct net_device *dev = pci_get_drvdata(pdev);
11966 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11969 bp = netdev_priv(dev);
11973 pci_save_state(pdev);
11975 if (!netif_running(dev)) {
11980 netif_device_detach(dev);
11982 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11984 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11991 static int bnx2x_resume(struct pci_dev *pdev)
11993 struct net_device *dev = pci_get_drvdata(pdev);
11998 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12001 bp = netdev_priv(dev);
12005 pci_restore_state(pdev);
12007 if (!netif_running(dev)) {
12012 bnx2x_set_power_state(bp, PCI_D0);
12013 netif_device_attach(dev);
12015 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12022 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12026 bp->state = BNX2X_STATE_ERROR;
12028 bp->rx_mode = BNX2X_RX_MODE_NONE;
12030 bnx2x_netif_stop(bp, 0);
12032 del_timer_sync(&bp->timer);
12033 bp->stats_state = STATS_STATE_DISABLED;
12034 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12037 bnx2x_free_irq(bp);
12039 if (CHIP_IS_E1(bp)) {
12040 struct mac_configuration_cmd *config =
12041 bnx2x_sp(bp, mcast_config);
12043 for (i = 0; i < config->hdr.length; i++)
12044 CAM_INVALIDATE(config->config_table[i]);
12047 /* Free SKBs, SGEs, TPA pool and driver internals */
12048 bnx2x_free_skbs(bp);
12049 for_each_rx_queue(bp, i)
12050 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12051 for_each_rx_queue(bp, i)
12052 netif_napi_del(&bnx2x_fp(bp, i, napi));
12053 bnx2x_free_mem(bp);
12055 bp->state = BNX2X_STATE_CLOSED;
12057 netif_carrier_off(bp->dev);
12062 static void bnx2x_eeh_recover(struct bnx2x *bp)
12066 mutex_init(&bp->port.phy_mutex);
12068 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12069 bp->link_params.shmem_base = bp->common.shmem_base;
12070 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12072 if (!bp->common.shmem_base ||
12073 (bp->common.shmem_base < 0xA0000) ||
12074 (bp->common.shmem_base >= 0xC0000)) {
12075 BNX2X_DEV_INFO("MCP not active\n");
12076 bp->flags |= NO_MCP_FLAG;
12080 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12081 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12082 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12083 BNX2X_ERR("BAD MCP validity signature\n");
12085 if (!BP_NOMCP(bp)) {
12086 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12087 & DRV_MSG_SEQ_NUMBER_MASK);
12088 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12093 * bnx2x_io_error_detected - called when PCI error is detected
12094 * @pdev: Pointer to PCI device
12095 * @state: The current pci connection state
12097 * This function is called after a PCI bus error affecting
12098 * this device has been detected.
12100 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12101 pci_channel_state_t state)
12103 struct net_device *dev = pci_get_drvdata(pdev);
12104 struct bnx2x *bp = netdev_priv(dev);
12108 netif_device_detach(dev);
12110 if (state == pci_channel_io_perm_failure) {
12112 return PCI_ERS_RESULT_DISCONNECT;
12115 if (netif_running(dev))
12116 bnx2x_eeh_nic_unload(bp);
12118 pci_disable_device(pdev);
12122 /* Request a slot reset */
12123 return PCI_ERS_RESULT_NEED_RESET;
12127 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12128 * @pdev: Pointer to PCI device
12130 * Restart the card from scratch, as if from a cold-boot.
12132 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12134 struct net_device *dev = pci_get_drvdata(pdev);
12135 struct bnx2x *bp = netdev_priv(dev);
12139 if (pci_enable_device(pdev)) {
12140 dev_err(&pdev->dev,
12141 "Cannot re-enable PCI device after reset\n");
12143 return PCI_ERS_RESULT_DISCONNECT;
12146 pci_set_master(pdev);
12147 pci_restore_state(pdev);
12149 if (netif_running(dev))
12150 bnx2x_set_power_state(bp, PCI_D0);
12154 return PCI_ERS_RESULT_RECOVERED;
12158 * bnx2x_io_resume - called when traffic can start flowing again
12159 * @pdev: Pointer to PCI device
12161 * This callback is called when the error recovery driver tells us that
12162 * its OK to resume normal operation.
12164 static void bnx2x_io_resume(struct pci_dev *pdev)
12166 struct net_device *dev = pci_get_drvdata(pdev);
12167 struct bnx2x *bp = netdev_priv(dev);
12171 bnx2x_eeh_recover(bp);
12173 if (netif_running(dev))
12174 bnx2x_nic_load(bp, LOAD_NORMAL);
12176 netif_device_attach(dev);
12181 static struct pci_error_handlers bnx2x_err_handler = {
12182 .error_detected = bnx2x_io_error_detected,
12183 .slot_reset = bnx2x_io_slot_reset,
12184 .resume = bnx2x_io_resume,
12187 static struct pci_driver bnx2x_pci_driver = {
12188 .name = DRV_MODULE_NAME,
12189 .id_table = bnx2x_pci_tbl,
12190 .probe = bnx2x_init_one,
12191 .remove = __devexit_p(bnx2x_remove_one),
12192 .suspend = bnx2x_suspend,
12193 .resume = bnx2x_resume,
12194 .err_handler = &bnx2x_err_handler,
12197 static int __init bnx2x_init(void)
12201 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12202 if (bnx2x_wq == NULL) {
12203 printk(KERN_ERR PFX "Cannot create workqueue\n");
12207 ret = pci_register_driver(&bnx2x_pci_driver);
12209 printk(KERN_ERR PFX "Cannot register driver\n");
12210 destroy_workqueue(bnx2x_wq);
12215 static void __exit bnx2x_cleanup(void)
12217 pci_unregister_driver(&bnx2x_pci_driver);
12219 destroy_workqueue(bnx2x_wq);
12222 module_init(bnx2x_init);
12223 module_exit(bnx2x_cleanup);