1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.105-1"
60 #define DRV_MODULE_RELDATE "2009/04/22"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
107 static struct workqueue_struct *bnx2x_wq;
109 enum bnx2x_board_type {
115 /* indexed by board_type, above */
118 } board_info[] __devinitdata = {
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
142 * locking is done by mcp
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
164 static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
191 struct dmae_command *dmae = &bp->init_dmae;
192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
204 mutex_lock(&bp->dmae_mutex);
206 memset(dmae, 0, sizeof(struct dmae_command));
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
214 DMAE_CMD_ENDIANITY_DW_SWAP |
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225 dmae->comp_val = DMAE_COMP_VAL;
227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
248 BNX2X_ERR("DMAE timeout!\n");
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
259 mutex_unlock(&bp->dmae_mutex);
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
264 struct dmae_command *dmae = &bp->init_dmae;
265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
279 mutex_lock(&bp->dmae_mutex);
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
290 DMAE_CMD_ENDIANITY_DW_SWAP |
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301 dmae->comp_val = DMAE_COMP_VAL;
303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
317 while (*wb_comp != DMAE_COMP_VAL) {
320 BNX2X_ERR("DMAE timeout!\n");
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
334 mutex_unlock(&bp->dmae_mutex);
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
352 REG_RD_DMAE(bp, reg, wb_data, 2);
354 return HILO_U64(wb_data[0], wb_data[1]);
358 static int bnx2x_mc_assert(struct bnx2x *bp)
362 u32 row0, row1, row2, row3;
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
479 static void bnx2x_fw_dump(struct bnx2x *bp)
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 printk(KERN_CONT "%s", (char *)data);
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
501 printk(KERN_CONT "%s", (char *)data);
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
506 static void bnx2x_panic_dump(struct bnx2x *bp)
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
514 BNX2X_ERR("begin crash dump -----------------\n");
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
525 for_each_rx_queue(bp, i) {
526 struct bnx2x_fastpath *fp = &bp->fp[i];
528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
531 i, fp->rx_bd_prod, fp->rx_bd_cons,
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563 for (j = start; j != end; j = RX_BD(j + 1)) {
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
573 for (j = start; j != end; j = RX_SGE(j + 1)) {
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
616 BNX2X_ERR("end crash dump -----------------\n");
619 static void bnx2x_int_enable(struct bnx2x *bp)
621 int port = BP_PORT(bp);
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 REG_WR(bp, addr, val);
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
654 REG_WR(bp, addr, val);
656 if (CHIP_IS_E1H(bp)) {
657 /* init leading/trailing edge */
659 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
661 /* enable nig and gpio3 attention */
666 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
667 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
671 static void bnx2x_int_disable(struct bnx2x *bp)
673 int port = BP_PORT(bp);
674 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
675 u32 val = REG_RD(bp, addr);
677 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
678 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
679 HC_CONFIG_0_REG_INT_LINE_EN_0 |
680 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
682 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
685 /* flush all outstanding writes */
688 REG_WR(bp, addr, val);
689 if (REG_RD(bp, addr) != val)
690 BNX2X_ERR("BUG! proper val not read from IGU!\n");
694 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
696 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
699 /* disable interrupt handling */
700 atomic_inc(&bp->intr_sem);
702 /* prevent the HW from sending interrupts */
703 bnx2x_int_disable(bp);
705 /* make sure all ISRs are done */
707 synchronize_irq(bp->msix_table[0].vector);
709 for_each_queue(bp, i)
710 synchronize_irq(bp->msix_table[i + offset].vector);
712 synchronize_irq(bp->pdev->irq);
714 /* make sure sp_task is not running */
715 cancel_delayed_work(&bp->sp_task);
716 flush_workqueue(bnx2x_wq);
722 * General service functions
725 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
726 u8 storm, u16 index, u8 op, u8 update)
728 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
729 COMMAND_REG_INT_ACK);
730 struct igu_ack_register igu_ack;
732 igu_ack.status_block_index = index;
733 igu_ack.sb_id_and_flags =
734 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
735 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
736 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
737 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
739 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
740 (*(u32 *)&igu_ack), hc_addr);
741 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
744 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
746 struct host_status_block *fpsb = fp->status_blk;
749 barrier(); /* status block is written to by the chip */
750 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
751 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
754 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
755 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
761 static u16 bnx2x_ack_int(struct bnx2x *bp)
763 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
764 COMMAND_REG_SIMD_MASK);
765 u32 result = REG_RD(bp, hc_addr);
767 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
775 * fast path service functions
778 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
782 /* Tell compiler that status block fields can change */
784 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
785 return (fp->tx_pkt_cons != tx_cons_sb);
788 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
790 /* Tell compiler that consumer and producer can change */
792 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
795 /* free skb in the packet ring at pos idx
796 * return idx of last bd freed
798 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
801 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
802 struct eth_tx_bd *tx_bd;
803 struct sk_buff *skb = tx_buf->skb;
804 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
807 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
811 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
812 tx_bd = &fp->tx_desc_ring[bd_idx];
813 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
814 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
816 nbd = le16_to_cpu(tx_bd->nbd) - 1;
817 new_cons = nbd + tx_buf->first_bd;
818 #ifdef BNX2X_STOP_ON_ERROR
819 if (nbd > (MAX_SKB_FRAGS + 2)) {
820 BNX2X_ERR("BAD nbd!\n");
825 /* Skip a parse bd and the TSO split header bd
826 since they have no mapping */
828 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
830 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
831 ETH_TX_BD_FLAGS_TCP_CSUM |
832 ETH_TX_BD_FLAGS_SW_LSO)) {
834 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
835 tx_bd = &fp->tx_desc_ring[bd_idx];
836 /* is this a TSO split header bd? */
837 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
839 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
846 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
847 tx_bd = &fp->tx_desc_ring[bd_idx];
848 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
849 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
857 tx_buf->first_bd = 0;
863 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
869 barrier(); /* Tell compiler that prod and cons can change */
870 prod = fp->tx_bd_prod;
871 cons = fp->tx_bd_cons;
873 /* NUM_TX_RINGS = number of "next-page" entries
874 It will be used as a threshold */
875 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
877 #ifdef BNX2X_STOP_ON_ERROR
879 WARN_ON(used > fp->bp->tx_ring_size);
880 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
883 return (s16)(fp->bp->tx_ring_size) - used;
886 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
888 struct bnx2x *bp = fp->bp;
889 struct netdev_queue *txq;
890 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
893 #ifdef BNX2X_STOP_ON_ERROR
894 if (unlikely(bp->panic))
898 txq = netdev_get_tx_queue(bp->dev, fp->index);
899 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
900 sw_cons = fp->tx_pkt_cons;
902 while (sw_cons != hw_cons) {
905 pkt_cons = TX_BD(sw_cons);
907 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
909 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
910 hw_cons, sw_cons, pkt_cons);
912 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
914 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
917 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
922 fp->tx_pkt_cons = sw_cons;
923 fp->tx_bd_cons = bd_cons;
925 /* TBD need a thresh? */
926 if (unlikely(netif_tx_queue_stopped(txq))) {
928 __netif_tx_lock(txq, smp_processor_id());
930 /* Need to make the tx_bd_cons update visible to start_xmit()
931 * before checking for netif_tx_queue_stopped(). Without the
932 * memory barrier, there is a small possibility that
933 * start_xmit() will miss it and cause the queue to be stopped
938 if ((netif_tx_queue_stopped(txq)) &&
939 (bp->state == BNX2X_STATE_OPEN) &&
940 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
941 netif_tx_wake_queue(txq);
943 __netif_tx_unlock(txq);
948 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
949 union eth_rx_cqe *rr_cqe)
951 struct bnx2x *bp = fp->bp;
952 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
953 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
956 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
957 fp->index, cid, command, bp->state,
958 rr_cqe->ramrod_cqe.ramrod_type);
963 switch (command | fp->state) {
964 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
965 BNX2X_FP_STATE_OPENING):
966 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
968 fp->state = BNX2X_FP_STATE_OPEN;
971 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
972 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
974 fp->state = BNX2X_FP_STATE_HALTED;
978 BNX2X_ERR("unexpected MC reply (%d) "
979 "fp->state is %x\n", command, fp->state);
982 mb(); /* force bnx2x_wait_ramrod() to see the change */
986 switch (command | bp->state) {
987 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
988 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
989 bp->state = BNX2X_STATE_OPEN;
992 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
993 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
994 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
995 fp->state = BNX2X_FP_STATE_HALTED;
998 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
999 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1000 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1004 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1005 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1006 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1007 bp->set_mac_pending = 0;
1010 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1011 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1015 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1016 command, bp->state);
1019 mb(); /* force bnx2x_wait_ramrod() to see the change */
1022 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1023 struct bnx2x_fastpath *fp, u16 index)
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct page *page = sw_buf->page;
1027 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1029 /* Skip "next page" elements */
1033 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1034 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1037 sw_buf->page = NULL;
1042 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1043 struct bnx2x_fastpath *fp, int last)
1047 for (i = 0; i < last; i++)
1048 bnx2x_free_rx_sge(bp, fp, i);
1051 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1052 struct bnx2x_fastpath *fp, u16 index)
1054 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1055 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1056 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1059 if (unlikely(page == NULL))
1062 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1063 PCI_DMA_FROMDEVICE);
1064 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1065 __free_pages(page, PAGES_PER_SGE_SHIFT);
1069 sw_buf->page = page;
1070 pci_unmap_addr_set(sw_buf, mapping, mapping);
1072 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1073 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1078 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1079 struct bnx2x_fastpath *fp, u16 index)
1081 struct sk_buff *skb;
1082 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1083 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1086 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1087 if (unlikely(skb == NULL))
1090 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1091 PCI_DMA_FROMDEVICE);
1092 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1098 pci_unmap_addr_set(rx_buf, mapping, mapping);
1100 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1101 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1106 /* note that we are not allocating a new skb,
1107 * we are just moving one from cons to prod
1108 * we are not creating a new mapping,
1109 * so there is no need to check for dma_mapping_error().
1111 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1112 struct sk_buff *skb, u16 cons, u16 prod)
1114 struct bnx2x *bp = fp->bp;
1115 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1116 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1117 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1118 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1120 pci_dma_sync_single_for_device(bp->pdev,
1121 pci_unmap_addr(cons_rx_buf, mapping),
1122 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1124 prod_rx_buf->skb = cons_rx_buf->skb;
1125 pci_unmap_addr_set(prod_rx_buf, mapping,
1126 pci_unmap_addr(cons_rx_buf, mapping));
1127 *prod_bd = *cons_bd;
1130 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1133 u16 last_max = fp->last_max_sge;
1135 if (SUB_S16(idx, last_max) > 0)
1136 fp->last_max_sge = idx;
1139 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1143 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1144 int idx = RX_SGE_CNT * i - 1;
1146 for (j = 0; j < 2; j++) {
1147 SGE_MASK_CLEAR_BIT(fp, idx);
1153 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1154 struct eth_fast_path_rx_cqe *fp_cqe)
1156 struct bnx2x *bp = fp->bp;
1157 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1158 le16_to_cpu(fp_cqe->len_on_bd)) >>
1160 u16 last_max, last_elem, first_elem;
1167 /* First mark all used pages */
1168 for (i = 0; i < sge_len; i++)
1169 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1171 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1172 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1174 /* Here we assume that the last SGE index is the biggest */
1175 prefetch((void *)(fp->sge_mask));
1176 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1178 last_max = RX_SGE(fp->last_max_sge);
1179 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1180 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1182 /* If ring is not full */
1183 if (last_elem + 1 != first_elem)
1186 /* Now update the prod */
1187 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1188 if (likely(fp->sge_mask[i]))
1191 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1192 delta += RX_SGE_MASK_ELEM_SZ;
1196 fp->rx_sge_prod += delta;
1197 /* clear page-end entries */
1198 bnx2x_clear_sge_mask_next_elems(fp);
1201 DP(NETIF_MSG_RX_STATUS,
1202 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1203 fp->last_max_sge, fp->rx_sge_prod);
1206 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1208 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1209 memset(fp->sge_mask, 0xff,
1210 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1212 /* Clear the two last indices in the page to 1:
1213 these are the indices that correspond to the "next" element,
1214 hence will never be indicated and should be removed from
1215 the calculations. */
1216 bnx2x_clear_sge_mask_next_elems(fp);
1219 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1220 struct sk_buff *skb, u16 cons, u16 prod)
1222 struct bnx2x *bp = fp->bp;
1223 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1224 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1225 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1228 /* move empty skb from pool to prod and map it */
1229 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1230 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1231 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1232 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1234 /* move partial skb from cons to pool (don't unmap yet) */
1235 fp->tpa_pool[queue] = *cons_rx_buf;
1237 /* mark bin state as start - print error if current state != stop */
1238 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1239 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1241 fp->tpa_state[queue] = BNX2X_TPA_START;
1243 /* point prod_bd to new skb */
1244 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1245 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1247 #ifdef BNX2X_STOP_ON_ERROR
1248 fp->tpa_queue_used |= (1 << queue);
1249 #ifdef __powerpc64__
1250 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1252 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1254 fp->tpa_queue_used);
1258 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1259 struct sk_buff *skb,
1260 struct eth_fast_path_rx_cqe *fp_cqe,
1263 struct sw_rx_page *rx_pg, old_rx_pg;
1264 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1265 u32 i, frag_len, frag_size, pages;
1269 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1270 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1272 /* This is needed in order to enable forwarding support */
1274 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1275 max(frag_size, (u32)len_on_bd));
1277 #ifdef BNX2X_STOP_ON_ERROR
1279 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1280 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1282 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1283 fp_cqe->pkt_len, len_on_bd);
1289 /* Run through the SGL and compose the fragmented skb */
1290 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1291 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1293 /* FW gives the indices of the SGE as if the ring is an array
1294 (meaning that "next" element will consume 2 indices) */
1295 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1296 rx_pg = &fp->rx_page_ring[sge_idx];
1299 /* If we fail to allocate a substitute page, we simply stop
1300 where we are and drop the whole packet */
1301 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1302 if (unlikely(err)) {
1303 fp->eth_q_stats.rx_skb_alloc_failed++;
1307 /* Unmap the page as we r going to pass it to the stack */
1308 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1309 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1311 /* Add one frag and update the appropriate fields in the skb */
1312 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1314 skb->data_len += frag_len;
1315 skb->truesize += frag_len;
1316 skb->len += frag_len;
1318 frag_size -= frag_len;
1324 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1325 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1328 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1329 struct sk_buff *skb = rx_buf->skb;
1331 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1333 /* Unmap skb in the pool anyway, as we are going to change
1334 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1336 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1337 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1339 if (likely(new_skb)) {
1340 /* fix ip xsum and give it to the stack */
1341 /* (no need to map the new skb) */
1344 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1345 PARSING_FLAGS_VLAN);
1346 int is_not_hwaccel_vlan_cqe =
1347 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1351 prefetch(((char *)(skb)) + 128);
1353 #ifdef BNX2X_STOP_ON_ERROR
1354 if (pad + len > bp->rx_buf_size) {
1355 BNX2X_ERR("skb_put is about to fail... "
1356 "pad %d len %d rx_buf_size %d\n",
1357 pad, len, bp->rx_buf_size);
1363 skb_reserve(skb, pad);
1366 skb->protocol = eth_type_trans(skb, bp->dev);
1367 skb->ip_summed = CHECKSUM_UNNECESSARY;
1372 iph = (struct iphdr *)skb->data;
1374 /* If there is no Rx VLAN offloading -
1375 take VLAN tag into an account */
1376 if (unlikely(is_not_hwaccel_vlan_cqe))
1377 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1380 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1383 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1384 &cqe->fast_path_cqe, cqe_idx)) {
1386 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1387 (!is_not_hwaccel_vlan_cqe))
1388 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1389 le16_to_cpu(cqe->fast_path_cqe.
1393 netif_receive_skb(skb);
1395 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1396 " - dropping packet!\n");
1401 /* put new skb in bin */
1402 fp->tpa_pool[queue].skb = new_skb;
1405 /* else drop the packet and keep the buffer in the bin */
1406 DP(NETIF_MSG_RX_STATUS,
1407 "Failed to allocate new skb - dropping packet!\n");
1408 fp->eth_q_stats.rx_skb_alloc_failed++;
1411 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1414 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1415 struct bnx2x_fastpath *fp,
1416 u16 bd_prod, u16 rx_comp_prod,
1419 struct ustorm_eth_rx_producers rx_prods = {0};
1422 /* Update producers */
1423 rx_prods.bd_prod = bd_prod;
1424 rx_prods.cqe_prod = rx_comp_prod;
1425 rx_prods.sge_prod = rx_sge_prod;
1428 * Make sure that the BD and SGE data is updated before updating the
1429 * producers since FW might read the BD/SGE right after the producer
1431 * This is only applicable for weak-ordered memory model archs such
1432 * as IA-64. The following barrier is also mandatory since FW will
1433 * assumes BDs must have buffers.
1437 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1438 REG_WR(bp, BAR_USTRORM_INTMEM +
1439 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1440 ((u32 *)&rx_prods)[i]);
1442 mmiowb(); /* keep prod updates ordered */
1444 DP(NETIF_MSG_RX_STATUS,
1445 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1446 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1449 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1451 struct bnx2x *bp = fp->bp;
1452 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1453 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1456 #ifdef BNX2X_STOP_ON_ERROR
1457 if (unlikely(bp->panic))
1461 /* CQ "next element" is of the size of the regular element,
1462 that's why it's ok here */
1463 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1464 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1467 bd_cons = fp->rx_bd_cons;
1468 bd_prod = fp->rx_bd_prod;
1469 bd_prod_fw = bd_prod;
1470 sw_comp_cons = fp->rx_comp_cons;
1471 sw_comp_prod = fp->rx_comp_prod;
1473 /* Memory barrier necessary as speculative reads of the rx
1474 * buffer can be ahead of the index in the status block
1478 DP(NETIF_MSG_RX_STATUS,
1479 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1480 fp->index, hw_comp_cons, sw_comp_cons);
1482 while (sw_comp_cons != hw_comp_cons) {
1483 struct sw_rx_bd *rx_buf = NULL;
1484 struct sk_buff *skb;
1485 union eth_rx_cqe *cqe;
1489 comp_ring_cons = RCQ_BD(sw_comp_cons);
1490 bd_prod = RX_BD(bd_prod);
1491 bd_cons = RX_BD(bd_cons);
1493 cqe = &fp->rx_comp_ring[comp_ring_cons];
1494 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1496 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1497 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1498 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1499 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1500 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1501 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1503 /* is this a slowpath msg? */
1504 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1505 bnx2x_sp_event(fp, cqe);
1508 /* this is an rx packet */
1510 rx_buf = &fp->rx_buf_ring[bd_cons];
1512 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1513 pad = cqe->fast_path_cqe.placement_offset;
1515 /* If CQE is marked both TPA_START and TPA_END
1516 it is a non-TPA CQE */
1517 if ((!fp->disable_tpa) &&
1518 (TPA_TYPE(cqe_fp_flags) !=
1519 (TPA_TYPE_START | TPA_TYPE_END))) {
1520 u16 queue = cqe->fast_path_cqe.queue_index;
1522 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1523 DP(NETIF_MSG_RX_STATUS,
1524 "calling tpa_start on queue %d\n",
1527 bnx2x_tpa_start(fp, queue, skb,
1532 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1533 DP(NETIF_MSG_RX_STATUS,
1534 "calling tpa_stop on queue %d\n",
1537 if (!BNX2X_RX_SUM_FIX(cqe))
1538 BNX2X_ERR("STOP on none TCP "
1541 /* This is a size of the linear data
1543 len = le16_to_cpu(cqe->fast_path_cqe.
1545 bnx2x_tpa_stop(bp, fp, queue, pad,
1546 len, cqe, comp_ring_cons);
1547 #ifdef BNX2X_STOP_ON_ERROR
1552 bnx2x_update_sge_prod(fp,
1553 &cqe->fast_path_cqe);
1558 pci_dma_sync_single_for_device(bp->pdev,
1559 pci_unmap_addr(rx_buf, mapping),
1560 pad + RX_COPY_THRESH,
1561 PCI_DMA_FROMDEVICE);
1563 prefetch(((char *)(skb)) + 128);
1565 /* is this an error packet? */
1566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1567 DP(NETIF_MSG_RX_ERR,
1568 "ERROR flags %x rx packet %u\n",
1569 cqe_fp_flags, sw_comp_cons);
1570 fp->eth_q_stats.rx_err_discard_pkt++;
1574 /* Since we don't have a jumbo ring
1575 * copy small packets if mtu > 1500
1577 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1578 (len <= RX_COPY_THRESH)) {
1579 struct sk_buff *new_skb;
1581 new_skb = netdev_alloc_skb(bp->dev,
1583 if (new_skb == NULL) {
1584 DP(NETIF_MSG_RX_ERR,
1585 "ERROR packet dropped "
1586 "because of alloc failure\n");
1587 fp->eth_q_stats.rx_skb_alloc_failed++;
1592 skb_copy_from_linear_data_offset(skb, pad,
1593 new_skb->data + pad, len);
1594 skb_reserve(new_skb, pad);
1595 skb_put(new_skb, len);
1597 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1601 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1602 pci_unmap_single(bp->pdev,
1603 pci_unmap_addr(rx_buf, mapping),
1605 PCI_DMA_FROMDEVICE);
1606 skb_reserve(skb, pad);
1610 DP(NETIF_MSG_RX_ERR,
1611 "ERROR packet dropped because "
1612 "of alloc failure\n");
1613 fp->eth_q_stats.rx_skb_alloc_failed++;
1615 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1619 skb->protocol = eth_type_trans(skb, bp->dev);
1621 skb->ip_summed = CHECKSUM_NONE;
1623 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1624 skb->ip_summed = CHECKSUM_UNNECESSARY;
1626 fp->eth_q_stats.hw_csum_err++;
1630 skb_record_rx_queue(skb, fp->index);
1632 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1633 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1634 PARSING_FLAGS_VLAN))
1635 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1636 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1639 netif_receive_skb(skb);
1645 bd_cons = NEXT_RX_IDX(bd_cons);
1646 bd_prod = NEXT_RX_IDX(bd_prod);
1647 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1650 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1651 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1653 if (rx_pkt == budget)
1657 fp->rx_bd_cons = bd_cons;
1658 fp->rx_bd_prod = bd_prod_fw;
1659 fp->rx_comp_cons = sw_comp_cons;
1660 fp->rx_comp_prod = sw_comp_prod;
1662 /* Update producers */
1663 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1666 fp->rx_pkt += rx_pkt;
1672 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1674 struct bnx2x_fastpath *fp = fp_cookie;
1675 struct bnx2x *bp = fp->bp;
1676 int index = fp->index;
1678 /* Return here if interrupt is disabled */
1679 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1680 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1684 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1686 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1688 #ifdef BNX2X_STOP_ON_ERROR
1689 if (unlikely(bp->panic))
1693 prefetch(fp->rx_cons_sb);
1694 prefetch(fp->tx_cons_sb);
1695 prefetch(&fp->status_blk->c_status_block.status_block_index);
1696 prefetch(&fp->status_blk->u_status_block.status_block_index);
1698 napi_schedule(&bnx2x_fp(bp, index, napi));
1703 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1705 struct bnx2x *bp = netdev_priv(dev_instance);
1706 u16 status = bnx2x_ack_int(bp);
1709 /* Return here if interrupt is shared and it's not for us */
1710 if (unlikely(status == 0)) {
1711 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1714 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1716 /* Return here if interrupt is disabled */
1717 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1718 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1722 #ifdef BNX2X_STOP_ON_ERROR
1723 if (unlikely(bp->panic))
1727 mask = 0x2 << bp->fp[0].sb_id;
1728 if (status & mask) {
1729 struct bnx2x_fastpath *fp = &bp->fp[0];
1731 prefetch(fp->rx_cons_sb);
1732 prefetch(fp->tx_cons_sb);
1733 prefetch(&fp->status_blk->c_status_block.status_block_index);
1734 prefetch(&fp->status_blk->u_status_block.status_block_index);
1736 napi_schedule(&bnx2x_fp(bp, 0, napi));
1742 if (unlikely(status & 0x1)) {
1743 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1751 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1757 /* end of fast path */
1759 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1764 * General service functions
1767 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1770 u32 resource_bit = (1 << resource);
1771 int func = BP_FUNC(bp);
1772 u32 hw_lock_control_reg;
1775 /* Validating that the resource is within range */
1776 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1778 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1779 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1784 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1786 hw_lock_control_reg =
1787 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1790 /* Validating that the resource is not already taken */
1791 lock_status = REG_RD(bp, hw_lock_control_reg);
1792 if (lock_status & resource_bit) {
1793 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1794 lock_status, resource_bit);
1798 /* Try for 5 second every 5ms */
1799 for (cnt = 0; cnt < 1000; cnt++) {
1800 /* Try to acquire the lock */
1801 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1802 lock_status = REG_RD(bp, hw_lock_control_reg);
1803 if (lock_status & resource_bit)
1808 DP(NETIF_MSG_HW, "Timeout\n");
1812 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1815 u32 resource_bit = (1 << resource);
1816 int func = BP_FUNC(bp);
1817 u32 hw_lock_control_reg;
1819 /* Validating that the resource is within range */
1820 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1822 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1823 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1828 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1830 hw_lock_control_reg =
1831 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1834 /* Validating that the resource is currently taken */
1835 lock_status = REG_RD(bp, hw_lock_control_reg);
1836 if (!(lock_status & resource_bit)) {
1837 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1838 lock_status, resource_bit);
1842 REG_WR(bp, hw_lock_control_reg, resource_bit);
1846 /* HW Lock for shared dual port PHYs */
1847 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1849 mutex_lock(&bp->port.phy_mutex);
1851 if (bp->port.need_hw_lock)
1852 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1855 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1857 if (bp->port.need_hw_lock)
1858 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1860 mutex_unlock(&bp->port.phy_mutex);
1863 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1865 /* The GPIO should be swapped if swap register is set and active */
1866 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1867 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1868 int gpio_shift = gpio_num +
1869 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870 u32 gpio_mask = (1 << gpio_shift);
1874 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1875 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1879 /* read GPIO value */
1880 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1882 /* get the requested pin value */
1883 if ((gpio_reg & gpio_mask) == gpio_mask)
1888 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1893 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1895 /* The GPIO should be swapped if swap register is set and active */
1896 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1897 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1898 int gpio_shift = gpio_num +
1899 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1900 u32 gpio_mask = (1 << gpio_shift);
1903 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1904 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1908 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1909 /* read GPIO and mask except the float bits */
1910 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1913 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1914 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1915 gpio_num, gpio_shift);
1916 /* clear FLOAT and set CLR */
1917 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1921 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1922 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1923 gpio_num, gpio_shift);
1924 /* clear FLOAT and set SET */
1925 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1929 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1930 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1931 gpio_num, gpio_shift);
1933 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1940 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1941 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1946 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1948 /* The GPIO should be swapped if swap register is set and active */
1949 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1950 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1951 int gpio_shift = gpio_num +
1952 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1953 u32 gpio_mask = (1 << gpio_shift);
1956 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1957 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1961 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1963 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1966 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1967 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1968 "output low\n", gpio_num, gpio_shift);
1969 /* clear SET and set CLR */
1970 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1971 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1974 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1975 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1976 "output high\n", gpio_num, gpio_shift);
1977 /* clear CLR and set SET */
1978 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1979 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1986 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1992 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1994 u32 spio_mask = (1 << spio_num);
1997 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1998 (spio_num > MISC_REGISTERS_SPIO_7)) {
1999 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2004 /* read SPIO and mask except the float bits */
2005 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2008 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2010 /* clear FLOAT and set CLR */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2015 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2017 /* clear FLOAT and set SET */
2018 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2022 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2023 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2025 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2032 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2033 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2038 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2040 switch (bp->link_vars.ieee_fc &
2041 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2042 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2043 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2047 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2048 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2052 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2053 bp->port.advertising |= ADVERTISED_Asym_Pause;
2057 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2063 static void bnx2x_link_report(struct bnx2x *bp)
2065 if (bp->link_vars.link_up) {
2066 if (bp->state == BNX2X_STATE_OPEN)
2067 netif_carrier_on(bp->dev);
2068 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2070 printk("%d Mbps ", bp->link_vars.line_speed);
2072 if (bp->link_vars.duplex == DUPLEX_FULL)
2073 printk("full duplex");
2075 printk("half duplex");
2077 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2078 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2079 printk(", receive ");
2080 if (bp->link_vars.flow_ctrl &
2082 printk("& transmit ");
2084 printk(", transmit ");
2086 printk("flow control ON");
2090 } else { /* link_down */
2091 netif_carrier_off(bp->dev);
2092 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2096 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2098 if (!BP_NOMCP(bp)) {
2101 /* Initialize link parameters structure variables */
2102 /* It is recommended to turn off RX FC for jumbo frames
2103 for better performance */
2105 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2106 else if (bp->dev->mtu > 5000)
2107 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2109 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2111 bnx2x_acquire_phy_lock(bp);
2113 if (load_mode == LOAD_DIAG)
2114 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2116 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2118 bnx2x_release_phy_lock(bp);
2120 bnx2x_calc_fc_adv(bp);
2122 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2123 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2124 bnx2x_link_report(bp);
2129 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2133 static void bnx2x_link_set(struct bnx2x *bp)
2135 if (!BP_NOMCP(bp)) {
2136 bnx2x_acquire_phy_lock(bp);
2137 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2138 bnx2x_release_phy_lock(bp);
2140 bnx2x_calc_fc_adv(bp);
2142 BNX2X_ERR("Bootcode is missing - can not set link\n");
2145 static void bnx2x__link_reset(struct bnx2x *bp)
2147 if (!BP_NOMCP(bp)) {
2148 bnx2x_acquire_phy_lock(bp);
2149 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2150 bnx2x_release_phy_lock(bp);
2152 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2155 static u8 bnx2x_link_test(struct bnx2x *bp)
2159 bnx2x_acquire_phy_lock(bp);
2160 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2161 bnx2x_release_phy_lock(bp);
2166 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2168 u32 r_param = bp->link_vars.line_speed / 8;
2169 u32 fair_periodic_timeout_usec;
2172 memset(&(bp->cmng.rs_vars), 0,
2173 sizeof(struct rate_shaping_vars_per_port));
2174 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2176 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2177 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2179 /* this is the threshold below which no timer arming will occur
2180 1.25 coefficient is for the threshold to be a little bigger
2181 than the real time, to compensate for timer in-accuracy */
2182 bp->cmng.rs_vars.rs_threshold =
2183 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2185 /* resolution of fairness timer */
2186 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2187 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2188 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2190 /* this is the threshold below which we won't arm the timer anymore */
2191 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2193 /* we multiply by 1e3/8 to get bytes/msec.
2194 We don't want the credits to pass a credit
2195 of the t_fair*FAIR_MEM (algorithm resolution) */
2196 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2197 /* since each tick is 4 usec */
2198 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2201 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2203 struct rate_shaping_vars_per_vn m_rs_vn;
2204 struct fairness_vars_per_vn m_fair_vn;
2205 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2206 u16 vn_min_rate, vn_max_rate;
2209 /* If function is hidden - set min and max to zeroes */
2210 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2215 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2216 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2217 /* If fairness is enabled (not all min rates are zeroes) and
2218 if current min rate is zero - set it to 1.
2219 This is a requirement of the algorithm. */
2220 if (bp->vn_weight_sum && (vn_min_rate == 0))
2221 vn_min_rate = DEF_MIN_RATE;
2222 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2223 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2227 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2228 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2230 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2231 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2233 /* global vn counter - maximal Mbps for this vn */
2234 m_rs_vn.vn_counter.rate = vn_max_rate;
2236 /* quota - number of bytes transmitted in this period */
2237 m_rs_vn.vn_counter.quota =
2238 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2240 if (bp->vn_weight_sum) {
2241 /* credit for each period of the fairness algorithm:
2242 number of bytes in T_FAIR (the vn share the port rate).
2243 vn_weight_sum should not be larger than 10000, thus
2244 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2246 m_fair_vn.vn_credit_delta =
2247 max((u32)(vn_min_rate * (T_FAIR_COEF /
2248 (8 * bp->vn_weight_sum))),
2249 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2250 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2251 m_fair_vn.vn_credit_delta);
2254 /* Store it to internal memory */
2255 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2256 REG_WR(bp, BAR_XSTRORM_INTMEM +
2257 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2258 ((u32 *)(&m_rs_vn))[i]);
2260 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2261 REG_WR(bp, BAR_XSTRORM_INTMEM +
2262 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2263 ((u32 *)(&m_fair_vn))[i]);
2267 /* This function is called upon link interrupt */
2268 static void bnx2x_link_attn(struct bnx2x *bp)
2270 /* Make sure that we are synced with the current statistics */
2271 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2273 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2275 if (bp->link_vars.link_up) {
2277 /* dropless flow control */
2278 if (CHIP_IS_E1H(bp)) {
2279 int port = BP_PORT(bp);
2280 u32 pause_enabled = 0;
2282 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2285 REG_WR(bp, BAR_USTRORM_INTMEM +
2286 USTORM_PAUSE_ENABLED_OFFSET(port),
2290 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2291 struct host_port_stats *pstats;
2293 pstats = bnx2x_sp(bp, port_stats);
2294 /* reset old bmac stats */
2295 memset(&(pstats->mac_stx[0]), 0,
2296 sizeof(struct mac_stx));
2298 if ((bp->state == BNX2X_STATE_OPEN) ||
2299 (bp->state == BNX2X_STATE_DISABLED))
2300 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2303 /* indicate link status */
2304 bnx2x_link_report(bp);
2307 int port = BP_PORT(bp);
2311 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2312 if (vn == BP_E1HVN(bp))
2315 func = ((vn << 1) | port);
2317 /* Set the attention towards other drivers
2319 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2320 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2323 if (bp->link_vars.link_up) {
2326 /* Init rate shaping and fairness contexts */
2327 bnx2x_init_port_minmax(bp);
2329 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2330 bnx2x_init_vn_minmax(bp, 2*vn + port);
2332 /* Store it to internal memory */
2334 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2335 REG_WR(bp, BAR_XSTRORM_INTMEM +
2336 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2337 ((u32 *)(&bp->cmng))[i]);
2342 static void bnx2x__link_status_update(struct bnx2x *bp)
2344 if (bp->state != BNX2X_STATE_OPEN)
2347 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2349 if (bp->link_vars.link_up)
2350 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2352 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2354 /* indicate link status */
2355 bnx2x_link_report(bp);
2358 static void bnx2x_pmf_update(struct bnx2x *bp)
2360 int port = BP_PORT(bp);
2364 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2366 /* enable nig attention */
2367 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2368 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2369 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2371 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2379 * General service functions
2382 /* the slow path queue is odd since completions arrive on the fastpath ring */
2383 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2384 u32 data_hi, u32 data_lo, int common)
2386 int func = BP_FUNC(bp);
2388 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2389 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2390 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2391 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2392 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2394 #ifdef BNX2X_STOP_ON_ERROR
2395 if (unlikely(bp->panic))
2399 spin_lock_bh(&bp->spq_lock);
2401 if (!bp->spq_left) {
2402 BNX2X_ERR("BUG! SPQ ring full!\n");
2403 spin_unlock_bh(&bp->spq_lock);
2408 /* CID needs port number to be encoded int it */
2409 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2410 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2412 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2414 bp->spq_prod_bd->hdr.type |=
2415 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2417 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2418 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2422 if (bp->spq_prod_bd == bp->spq_last_bd) {
2423 bp->spq_prod_bd = bp->spq;
2424 bp->spq_prod_idx = 0;
2425 DP(NETIF_MSG_TIMER, "end of spq\n");
2432 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2435 spin_unlock_bh(&bp->spq_lock);
2439 /* acquire split MCP access lock register */
2440 static int bnx2x_acquire_alr(struct bnx2x *bp)
2447 for (j = 0; j < i*10; j++) {
2449 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2450 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2451 if (val & (1L << 31))
2456 if (!(val & (1L << 31))) {
2457 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2464 /* release split MCP access lock register */
2465 static void bnx2x_release_alr(struct bnx2x *bp)
2469 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2472 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2474 struct host_def_status_block *def_sb = bp->def_status_blk;
2477 barrier(); /* status block is written to by the chip */
2478 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2479 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2482 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2483 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2486 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2487 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2490 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2491 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2494 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2495 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2502 * slow path service functions
2505 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2507 int port = BP_PORT(bp);
2508 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2509 COMMAND_REG_ATTN_BITS_SET);
2510 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2511 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2512 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2513 NIG_REG_MASK_INTERRUPT_PORT0;
2517 if (bp->attn_state & asserted)
2518 BNX2X_ERR("IGU ERROR\n");
2520 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2521 aeu_mask = REG_RD(bp, aeu_addr);
2523 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2524 aeu_mask, asserted);
2525 aeu_mask &= ~(asserted & 0xff);
2526 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2528 REG_WR(bp, aeu_addr, aeu_mask);
2529 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2531 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2532 bp->attn_state |= asserted;
2533 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2535 if (asserted & ATTN_HARD_WIRED_MASK) {
2536 if (asserted & ATTN_NIG_FOR_FUNC) {
2538 bnx2x_acquire_phy_lock(bp);
2540 /* save nig interrupt mask */
2541 nig_mask = REG_RD(bp, nig_int_mask_addr);
2542 REG_WR(bp, nig_int_mask_addr, 0);
2544 bnx2x_link_attn(bp);
2546 /* handle unicore attn? */
2548 if (asserted & ATTN_SW_TIMER_4_FUNC)
2549 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2551 if (asserted & GPIO_2_FUNC)
2552 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2554 if (asserted & GPIO_3_FUNC)
2555 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2557 if (asserted & GPIO_4_FUNC)
2558 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2561 if (asserted & ATTN_GENERAL_ATTN_1) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2565 if (asserted & ATTN_GENERAL_ATTN_2) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2569 if (asserted & ATTN_GENERAL_ATTN_3) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2574 if (asserted & ATTN_GENERAL_ATTN_4) {
2575 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2576 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2578 if (asserted & ATTN_GENERAL_ATTN_5) {
2579 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2580 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2582 if (asserted & ATTN_GENERAL_ATTN_6) {
2583 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2588 } /* if hardwired */
2590 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2592 REG_WR(bp, hc_addr, asserted);
2594 /* now set back the mask */
2595 if (asserted & ATTN_NIG_FOR_FUNC) {
2596 REG_WR(bp, nig_int_mask_addr, nig_mask);
2597 bnx2x_release_phy_lock(bp);
2601 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2603 int port = BP_PORT(bp);
2605 /* mark the failure */
2606 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2607 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2608 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2609 bp->link_params.ext_phy_config);
2611 /* log the failure */
2612 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2613 " the driver to shutdown the card to prevent permanent"
2614 " damage. Please contact Dell Support for assistance\n",
2617 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2619 int port = BP_PORT(bp);
2623 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2624 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2626 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2628 val = REG_RD(bp, reg_offset);
2629 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2630 REG_WR(bp, reg_offset, val);
2632 BNX2X_ERR("SPIO5 hw attention\n");
2634 /* Fan failure attention */
2635 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2636 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2637 /* Low power mode is controlled by GPIO 2 */
2638 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2639 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2640 /* The PHY reset is controlled by GPIO 1 */
2641 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2642 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2648 bnx2x_fan_failure(bp);
2651 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2652 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2653 bnx2x_acquire_phy_lock(bp);
2654 bnx2x_handle_module_detect_int(&bp->link_params);
2655 bnx2x_release_phy_lock(bp);
2658 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2660 val = REG_RD(bp, reg_offset);
2661 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2662 REG_WR(bp, reg_offset, val);
2664 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2665 (attn & HW_INTERRUT_ASSERT_SET_0));
2670 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2674 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2676 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2677 BNX2X_ERR("DB hw attention 0x%x\n", val);
2678 /* DORQ discard attention */
2680 BNX2X_ERR("FATAL error from DORQ\n");
2683 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2685 int port = BP_PORT(bp);
2688 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2689 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2691 val = REG_RD(bp, reg_offset);
2692 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2693 REG_WR(bp, reg_offset, val);
2695 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2696 (attn & HW_INTERRUT_ASSERT_SET_1));
2701 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2705 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2707 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2708 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2709 /* CFC error attention */
2711 BNX2X_ERR("FATAL error from CFC\n");
2714 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2716 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2717 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2718 /* RQ_USDMDP_FIFO_OVERFLOW */
2720 BNX2X_ERR("FATAL error from PXP\n");
2723 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2725 int port = BP_PORT(bp);
2728 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2729 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2731 val = REG_RD(bp, reg_offset);
2732 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2733 REG_WR(bp, reg_offset, val);
2735 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2736 (attn & HW_INTERRUT_ASSERT_SET_2));
2741 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2745 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2747 if (attn & BNX2X_PMF_LINK_ASSERT) {
2748 int func = BP_FUNC(bp);
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2751 bnx2x__link_status_update(bp);
2752 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2754 bnx2x_pmf_update(bp);
2756 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2758 BNX2X_ERR("MC assert!\n");
2759 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2760 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2761 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2762 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2765 } else if (attn & BNX2X_MCP_ASSERT) {
2767 BNX2X_ERR("MCP assert!\n");
2768 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2772 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2775 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2776 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2777 if (attn & BNX2X_GRC_TIMEOUT) {
2778 val = CHIP_IS_E1H(bp) ?
2779 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2780 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2782 if (attn & BNX2X_GRC_RSV) {
2783 val = CHIP_IS_E1H(bp) ?
2784 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2785 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2787 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2791 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2793 struct attn_route attn;
2794 struct attn_route group_mask;
2795 int port = BP_PORT(bp);
2801 /* need to take HW lock because MCP or other port might also
2802 try to handle this event */
2803 bnx2x_acquire_alr(bp);
2805 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2806 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2807 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2808 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2809 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2810 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2812 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2813 if (deasserted & (1 << index)) {
2814 group_mask = bp->attn_group[index];
2816 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2817 index, group_mask.sig[0], group_mask.sig[1],
2818 group_mask.sig[2], group_mask.sig[3]);
2820 bnx2x_attn_int_deasserted3(bp,
2821 attn.sig[3] & group_mask.sig[3]);
2822 bnx2x_attn_int_deasserted1(bp,
2823 attn.sig[1] & group_mask.sig[1]);
2824 bnx2x_attn_int_deasserted2(bp,
2825 attn.sig[2] & group_mask.sig[2]);
2826 bnx2x_attn_int_deasserted0(bp,
2827 attn.sig[0] & group_mask.sig[0]);
2829 if ((attn.sig[0] & group_mask.sig[0] &
2830 HW_PRTY_ASSERT_SET_0) ||
2831 (attn.sig[1] & group_mask.sig[1] &
2832 HW_PRTY_ASSERT_SET_1) ||
2833 (attn.sig[2] & group_mask.sig[2] &
2834 HW_PRTY_ASSERT_SET_2))
2835 BNX2X_ERR("FATAL HW block parity attention\n");
2839 bnx2x_release_alr(bp);
2841 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2844 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2846 REG_WR(bp, reg_addr, val);
2848 if (~bp->attn_state & deasserted)
2849 BNX2X_ERR("IGU ERROR\n");
2851 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2852 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2854 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2855 aeu_mask = REG_RD(bp, reg_addr);
2857 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2858 aeu_mask, deasserted);
2859 aeu_mask |= (deasserted & 0xff);
2860 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2862 REG_WR(bp, reg_addr, aeu_mask);
2863 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2865 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2866 bp->attn_state &= ~deasserted;
2867 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2870 static void bnx2x_attn_int(struct bnx2x *bp)
2872 /* read local copy of bits */
2873 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2875 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2877 u32 attn_state = bp->attn_state;
2879 /* look for changed bits */
2880 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2881 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2884 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2885 attn_bits, attn_ack, asserted, deasserted);
2887 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2888 BNX2X_ERR("BAD attention state\n");
2890 /* handle bits that were raised */
2892 bnx2x_attn_int_asserted(bp, asserted);
2895 bnx2x_attn_int_deasserted(bp, deasserted);
2898 static void bnx2x_sp_task(struct work_struct *work)
2900 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2904 /* Return here if interrupt is disabled */
2905 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2906 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2910 status = bnx2x_update_dsb_idx(bp);
2911 /* if (status == 0) */
2912 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2914 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2920 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2922 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2924 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2926 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2928 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2933 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2935 struct net_device *dev = dev_instance;
2936 struct bnx2x *bp = netdev_priv(dev);
2938 /* Return here if interrupt is disabled */
2939 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2940 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2944 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2946 #ifdef BNX2X_STOP_ON_ERROR
2947 if (unlikely(bp->panic))
2951 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2956 /* end of slow path */
2960 /****************************************************************************
2962 ****************************************************************************/
2964 /* sum[hi:lo] += add[hi:lo] */
2965 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2968 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2971 /* difference = minuend - subtrahend */
2972 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2974 if (m_lo < s_lo) { \
2976 d_hi = m_hi - s_hi; \
2978 /* we can 'loan' 1 */ \
2980 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2982 /* m_hi <= s_hi */ \
2987 /* m_lo >= s_lo */ \
2988 if (m_hi < s_hi) { \
2992 /* m_hi >= s_hi */ \
2993 d_hi = m_hi - s_hi; \
2994 d_lo = m_lo - s_lo; \
2999 #define UPDATE_STAT64(s, t) \
3001 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3002 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3003 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3004 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3005 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3006 pstats->mac_stx[1].t##_lo, diff.lo); \
3009 #define UPDATE_STAT64_NIG(s, t) \
3011 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3012 diff.lo, new->s##_lo, old->s##_lo); \
3013 ADD_64(estats->t##_hi, diff.hi, \
3014 estats->t##_lo, diff.lo); \
3017 /* sum[hi:lo] += add */
3018 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3021 s_hi += (s_lo < a) ? 1 : 0; \
3024 #define UPDATE_EXTEND_STAT(s) \
3026 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3027 pstats->mac_stx[1].s##_lo, \
3031 #define UPDATE_EXTEND_TSTAT(s, t) \
3033 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3034 old_tclient->s = tclient->s; \
3035 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3038 #define UPDATE_EXTEND_USTAT(s, t) \
3040 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3041 old_uclient->s = uclient->s; \
3042 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3045 #define UPDATE_EXTEND_XSTAT(s, t) \
3047 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3048 old_xclient->s = xclient->s; \
3049 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3052 /* minuend -= subtrahend */
3053 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3055 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3058 /* minuend[hi:lo] -= subtrahend */
3059 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3061 SUB_64(m_hi, 0, m_lo, s); \
3064 #define SUB_EXTEND_USTAT(s, t) \
3066 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3067 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3071 * General service functions
3074 static inline long bnx2x_hilo(u32 *hiref)
3076 u32 lo = *(hiref + 1);
3077 #if (BITS_PER_LONG == 64)
3080 return HILO_U64(hi, lo);
3087 * Init service functions
3090 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3092 if (!bp->stats_pending) {
3093 struct eth_query_ramrod_data ramrod_data = {0};
3096 ramrod_data.drv_counter = bp->stats_counter++;
3097 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3098 for_each_queue(bp, i)
3099 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3101 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3102 ((u32 *)&ramrod_data)[1],
3103 ((u32 *)&ramrod_data)[0], 0);
3105 /* stats ramrod has it's own slot on the spq */
3107 bp->stats_pending = 1;
3112 static void bnx2x_stats_init(struct bnx2x *bp)
3114 int port = BP_PORT(bp);
3117 bp->stats_pending = 0;
3118 bp->executer_idx = 0;
3119 bp->stats_counter = 0;
3123 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3125 bp->port.port_stx = 0;
3126 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3128 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3129 bp->port.old_nig_stats.brb_discard =
3130 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3131 bp->port.old_nig_stats.brb_truncate =
3132 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3133 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3134 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3135 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3136 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3138 /* function stats */
3139 for_each_queue(bp, i) {
3140 struct bnx2x_fastpath *fp = &bp->fp[i];
3142 memset(&fp->old_tclient, 0,
3143 sizeof(struct tstorm_per_client_stats));
3144 memset(&fp->old_uclient, 0,
3145 sizeof(struct ustorm_per_client_stats));
3146 memset(&fp->old_xclient, 0,
3147 sizeof(struct xstorm_per_client_stats));
3148 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3151 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3152 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3154 bp->stats_state = STATS_STATE_DISABLED;
3155 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3156 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3159 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3161 struct dmae_command *dmae = &bp->stats_dmae;
3162 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3164 *stats_comp = DMAE_COMP_VAL;
3165 if (CHIP_REV_IS_SLOW(bp))
3169 if (bp->executer_idx) {
3170 int loader_idx = PMF_DMAE_C(bp);
3172 memset(dmae, 0, sizeof(struct dmae_command));
3174 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3175 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3176 DMAE_CMD_DST_RESET |
3178 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3180 DMAE_CMD_ENDIANITY_DW_SWAP |
3182 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3184 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3185 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3186 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3187 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3188 sizeof(struct dmae_command) *
3189 (loader_idx + 1)) >> 2;
3190 dmae->dst_addr_hi = 0;
3191 dmae->len = sizeof(struct dmae_command) >> 2;
3194 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3195 dmae->comp_addr_hi = 0;
3199 bnx2x_post_dmae(bp, dmae, loader_idx);
3201 } else if (bp->func_stx) {
3203 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3207 static int bnx2x_stats_comp(struct bnx2x *bp)
3209 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3213 while (*stats_comp != DMAE_COMP_VAL) {
3215 BNX2X_ERR("timeout waiting for stats finished\n");
3225 * Statistics service functions
3228 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3230 struct dmae_command *dmae;
3232 int loader_idx = PMF_DMAE_C(bp);
3233 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3236 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3237 BNX2X_ERR("BUG!\n");
3241 bp->executer_idx = 0;
3243 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3245 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3247 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3249 DMAE_CMD_ENDIANITY_DW_SWAP |
3251 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3252 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3254 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3255 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3256 dmae->src_addr_lo = bp->port.port_stx >> 2;
3257 dmae->src_addr_hi = 0;
3258 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3259 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3260 dmae->len = DMAE_LEN32_RD_MAX;
3261 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3262 dmae->comp_addr_hi = 0;
3265 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3266 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3267 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3268 dmae->src_addr_hi = 0;
3269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3270 DMAE_LEN32_RD_MAX * 4);
3271 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3272 DMAE_LEN32_RD_MAX * 4);
3273 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3274 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3275 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3276 dmae->comp_val = DMAE_COMP_VAL;
3279 bnx2x_hw_stats_post(bp);
3280 bnx2x_stats_comp(bp);
3283 static void bnx2x_port_stats_init(struct bnx2x *bp)
3285 struct dmae_command *dmae;
3286 int port = BP_PORT(bp);
3287 int vn = BP_E1HVN(bp);
3289 int loader_idx = PMF_DMAE_C(bp);
3291 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3294 if (!bp->link_vars.link_up || !bp->port.pmf) {
3295 BNX2X_ERR("BUG!\n");
3299 bp->executer_idx = 0;
3302 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3303 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3304 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3306 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3308 DMAE_CMD_ENDIANITY_DW_SWAP |
3310 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3311 (vn << DMAE_CMD_E1HVN_SHIFT));
3313 if (bp->port.port_stx) {
3315 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3316 dmae->opcode = opcode;
3317 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3318 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3319 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3320 dmae->dst_addr_hi = 0;
3321 dmae->len = sizeof(struct host_port_stats) >> 2;
3322 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323 dmae->comp_addr_hi = 0;
3329 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330 dmae->opcode = opcode;
3331 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3332 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3333 dmae->dst_addr_lo = bp->func_stx >> 2;
3334 dmae->dst_addr_hi = 0;
3335 dmae->len = sizeof(struct host_func_stats) >> 2;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3342 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3343 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3344 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3346 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3348 DMAE_CMD_ENDIANITY_DW_SWAP |
3350 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3351 (vn << DMAE_CMD_E1HVN_SHIFT));
3353 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3355 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3356 NIG_REG_INGRESS_BMAC0_MEM);
3358 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3359 BIGMAC_REGISTER_TX_STAT_GTBYT */
3360 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3361 dmae->opcode = opcode;
3362 dmae->src_addr_lo = (mac_addr +
3363 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3364 dmae->src_addr_hi = 0;
3365 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3366 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3367 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3368 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3369 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3370 dmae->comp_addr_hi = 0;
3373 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3374 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3375 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3376 dmae->opcode = opcode;
3377 dmae->src_addr_lo = (mac_addr +
3378 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3379 dmae->src_addr_hi = 0;
3380 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3381 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3382 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3383 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3384 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3385 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3386 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3387 dmae->comp_addr_hi = 0;
3390 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3392 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3394 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3396 dmae->opcode = opcode;
3397 dmae->src_addr_lo = (mac_addr +
3398 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3399 dmae->src_addr_hi = 0;
3400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3401 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3402 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3404 dmae->comp_addr_hi = 0;
3407 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3408 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3409 dmae->opcode = opcode;
3410 dmae->src_addr_lo = (mac_addr +
3411 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3412 dmae->src_addr_hi = 0;
3413 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3414 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3415 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3416 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3418 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3419 dmae->comp_addr_hi = 0;
3422 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3423 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3424 dmae->opcode = opcode;
3425 dmae->src_addr_lo = (mac_addr +
3426 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3427 dmae->src_addr_hi = 0;
3428 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3429 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3430 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3431 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3432 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3433 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434 dmae->comp_addr_hi = 0;
3439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440 dmae->opcode = opcode;
3441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3442 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3443 dmae->src_addr_hi = 0;
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3445 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3446 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3447 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3448 dmae->comp_addr_hi = 0;
3451 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3452 dmae->opcode = opcode;
3453 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3454 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3455 dmae->src_addr_hi = 0;
3456 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3457 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3458 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3459 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3460 dmae->len = (2*sizeof(u32)) >> 2;
3461 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3462 dmae->comp_addr_hi = 0;
3465 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3466 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3467 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3468 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3470 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3472 DMAE_CMD_ENDIANITY_DW_SWAP |
3474 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3475 (vn << DMAE_CMD_E1HVN_SHIFT));
3476 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3477 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3478 dmae->src_addr_hi = 0;
3479 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3480 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3481 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3482 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3483 dmae->len = (2*sizeof(u32)) >> 2;
3484 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3486 dmae->comp_val = DMAE_COMP_VAL;
3491 static void bnx2x_func_stats_init(struct bnx2x *bp)
3493 struct dmae_command *dmae = &bp->stats_dmae;
3494 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3497 if (!bp->func_stx) {
3498 BNX2X_ERR("BUG!\n");
3502 bp->executer_idx = 0;
3503 memset(dmae, 0, sizeof(struct dmae_command));
3505 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3506 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3507 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3509 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3511 DMAE_CMD_ENDIANITY_DW_SWAP |
3513 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3514 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3515 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3516 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3517 dmae->dst_addr_lo = bp->func_stx >> 2;
3518 dmae->dst_addr_hi = 0;
3519 dmae->len = sizeof(struct host_func_stats) >> 2;
3520 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3521 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3522 dmae->comp_val = DMAE_COMP_VAL;
3527 static void bnx2x_stats_start(struct bnx2x *bp)
3530 bnx2x_port_stats_init(bp);
3532 else if (bp->func_stx)
3533 bnx2x_func_stats_init(bp);
3535 bnx2x_hw_stats_post(bp);
3536 bnx2x_storm_stats_post(bp);
3539 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3541 bnx2x_stats_comp(bp);
3542 bnx2x_stats_pmf_update(bp);
3543 bnx2x_stats_start(bp);
3546 static void bnx2x_stats_restart(struct bnx2x *bp)
3548 bnx2x_stats_comp(bp);
3549 bnx2x_stats_start(bp);
3552 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3554 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3555 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3556 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3562 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3563 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3564 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3565 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3566 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3567 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3568 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3569 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3570 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3571 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3572 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3573 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3574 UPDATE_STAT64(tx_stat_gt127,
3575 tx_stat_etherstatspkts65octetsto127octets);
3576 UPDATE_STAT64(tx_stat_gt255,
3577 tx_stat_etherstatspkts128octetsto255octets);
3578 UPDATE_STAT64(tx_stat_gt511,
3579 tx_stat_etherstatspkts256octetsto511octets);
3580 UPDATE_STAT64(tx_stat_gt1023,
3581 tx_stat_etherstatspkts512octetsto1023octets);
3582 UPDATE_STAT64(tx_stat_gt1518,
3583 tx_stat_etherstatspkts1024octetsto1522octets);
3584 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3585 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3586 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3587 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3588 UPDATE_STAT64(tx_stat_gterr,
3589 tx_stat_dot3statsinternalmactransmiterrors);
3590 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3592 estats->pause_frames_received_hi =
3593 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3594 estats->pause_frames_received_lo =
3595 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3597 estats->pause_frames_sent_hi =
3598 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3599 estats->pause_frames_sent_lo =
3600 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3603 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3605 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3606 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3607 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3609 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3610 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3611 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3612 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3613 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3614 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3615 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3616 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3617 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3618 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3619 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3620 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3621 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3622 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3623 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3624 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3625 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3628 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3629 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3630 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3631 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3632 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3633 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3634 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3635 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3636 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3637 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3638 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3639 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3641 estats->pause_frames_received_hi =
3642 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3643 estats->pause_frames_received_lo =
3644 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3645 ADD_64(estats->pause_frames_received_hi,
3646 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3647 estats->pause_frames_received_lo,
3648 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3650 estats->pause_frames_sent_hi =
3651 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3652 estats->pause_frames_sent_lo =
3653 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3654 ADD_64(estats->pause_frames_sent_hi,
3655 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3656 estats->pause_frames_sent_lo,
3657 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3660 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3662 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3663 struct nig_stats *old = &(bp->port.old_nig_stats);
3664 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3665 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3672 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3673 bnx2x_bmac_stats_update(bp);
3675 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3676 bnx2x_emac_stats_update(bp);
3678 else { /* unreached */
3679 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3683 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3684 new->brb_discard - old->brb_discard);
3685 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3686 new->brb_truncate - old->brb_truncate);
3688 UPDATE_STAT64_NIG(egress_mac_pkt0,
3689 etherstatspkts1024octetsto1522octets);
3690 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3692 memcpy(old, new, sizeof(struct nig_stats));
3694 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3695 sizeof(struct mac_stx));
3696 estats->brb_drop_hi = pstats->brb_drop_hi;
3697 estats->brb_drop_lo = pstats->brb_drop_lo;
3699 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3701 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3702 if (nig_timer_max != estats->nig_timer_max) {
3703 estats->nig_timer_max = nig_timer_max;
3704 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3710 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3712 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3713 struct tstorm_per_port_stats *tport =
3714 &stats->tstorm_common.port_statistics;
3715 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3716 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3719 memset(&(fstats->total_bytes_received_hi), 0,
3720 sizeof(struct host_func_stats) - 2*sizeof(u32));
3721 estats->error_bytes_received_hi = 0;
3722 estats->error_bytes_received_lo = 0;
3723 estats->etherstatsoverrsizepkts_hi = 0;
3724 estats->etherstatsoverrsizepkts_lo = 0;
3725 estats->no_buff_discard_hi = 0;
3726 estats->no_buff_discard_lo = 0;
3728 for_each_queue(bp, i) {
3729 struct bnx2x_fastpath *fp = &bp->fp[i];
3730 int cl_id = fp->cl_id;
3731 struct tstorm_per_client_stats *tclient =
3732 &stats->tstorm_common.client_statistics[cl_id];
3733 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3734 struct ustorm_per_client_stats *uclient =
3735 &stats->ustorm_common.client_statistics[cl_id];
3736 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3737 struct xstorm_per_client_stats *xclient =
3738 &stats->xstorm_common.client_statistics[cl_id];
3739 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3740 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3743 /* are storm stats valid? */
3744 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3745 bp->stats_counter) {
3746 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3747 " xstorm counter (%d) != stats_counter (%d)\n",
3748 i, xclient->stats_counter, bp->stats_counter);
3751 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3752 bp->stats_counter) {
3753 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3754 " tstorm counter (%d) != stats_counter (%d)\n",
3755 i, tclient->stats_counter, bp->stats_counter);
3758 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3759 bp->stats_counter) {
3760 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3761 " ustorm counter (%d) != stats_counter (%d)\n",
3762 i, uclient->stats_counter, bp->stats_counter);
3766 qstats->total_bytes_received_hi =
3767 qstats->valid_bytes_received_hi =
3768 le32_to_cpu(tclient->total_rcv_bytes.hi);
3769 qstats->total_bytes_received_lo =
3770 qstats->valid_bytes_received_lo =
3771 le32_to_cpu(tclient->total_rcv_bytes.lo);
3773 qstats->error_bytes_received_hi =
3774 le32_to_cpu(tclient->rcv_error_bytes.hi);
3775 qstats->error_bytes_received_lo =
3776 le32_to_cpu(tclient->rcv_error_bytes.lo);
3778 ADD_64(qstats->total_bytes_received_hi,
3779 qstats->error_bytes_received_hi,
3780 qstats->total_bytes_received_lo,
3781 qstats->error_bytes_received_lo);
3783 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3784 total_unicast_packets_received);
3785 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3786 total_multicast_packets_received);
3787 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3788 total_broadcast_packets_received);
3789 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3790 etherstatsoverrsizepkts);
3791 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3793 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3794 total_unicast_packets_received);
3795 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3796 total_multicast_packets_received);
3797 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3798 total_broadcast_packets_received);
3799 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3800 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3801 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3803 qstats->total_bytes_transmitted_hi =
3804 le32_to_cpu(xclient->total_sent_bytes.hi);
3805 qstats->total_bytes_transmitted_lo =
3806 le32_to_cpu(xclient->total_sent_bytes.lo);
3808 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3809 total_unicast_packets_transmitted);
3810 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3811 total_multicast_packets_transmitted);
3812 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3813 total_broadcast_packets_transmitted);
3815 old_tclient->checksum_discard = tclient->checksum_discard;
3816 old_tclient->ttl0_discard = tclient->ttl0_discard;
3818 ADD_64(fstats->total_bytes_received_hi,
3819 qstats->total_bytes_received_hi,
3820 fstats->total_bytes_received_lo,
3821 qstats->total_bytes_received_lo);
3822 ADD_64(fstats->total_bytes_transmitted_hi,
3823 qstats->total_bytes_transmitted_hi,
3824 fstats->total_bytes_transmitted_lo,
3825 qstats->total_bytes_transmitted_lo);
3826 ADD_64(fstats->total_unicast_packets_received_hi,
3827 qstats->total_unicast_packets_received_hi,
3828 fstats->total_unicast_packets_received_lo,
3829 qstats->total_unicast_packets_received_lo);
3830 ADD_64(fstats->total_multicast_packets_received_hi,
3831 qstats->total_multicast_packets_received_hi,
3832 fstats->total_multicast_packets_received_lo,
3833 qstats->total_multicast_packets_received_lo);
3834 ADD_64(fstats->total_broadcast_packets_received_hi,
3835 qstats->total_broadcast_packets_received_hi,
3836 fstats->total_broadcast_packets_received_lo,
3837 qstats->total_broadcast_packets_received_lo);
3838 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3839 qstats->total_unicast_packets_transmitted_hi,
3840 fstats->total_unicast_packets_transmitted_lo,
3841 qstats->total_unicast_packets_transmitted_lo);
3842 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3843 qstats->total_multicast_packets_transmitted_hi,
3844 fstats->total_multicast_packets_transmitted_lo,
3845 qstats->total_multicast_packets_transmitted_lo);
3846 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3847 qstats->total_broadcast_packets_transmitted_hi,
3848 fstats->total_broadcast_packets_transmitted_lo,
3849 qstats->total_broadcast_packets_transmitted_lo);
3850 ADD_64(fstats->valid_bytes_received_hi,
3851 qstats->valid_bytes_received_hi,
3852 fstats->valid_bytes_received_lo,
3853 qstats->valid_bytes_received_lo);
3855 ADD_64(estats->error_bytes_received_hi,
3856 qstats->error_bytes_received_hi,
3857 estats->error_bytes_received_lo,
3858 qstats->error_bytes_received_lo);
3859 ADD_64(estats->etherstatsoverrsizepkts_hi,
3860 qstats->etherstatsoverrsizepkts_hi,
3861 estats->etherstatsoverrsizepkts_lo,
3862 qstats->etherstatsoverrsizepkts_lo);
3863 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3864 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3867 ADD_64(fstats->total_bytes_received_hi,
3868 estats->rx_stat_ifhcinbadoctets_hi,
3869 fstats->total_bytes_received_lo,
3870 estats->rx_stat_ifhcinbadoctets_lo);
3872 memcpy(estats, &(fstats->total_bytes_received_hi),
3873 sizeof(struct host_func_stats) - 2*sizeof(u32));
3875 ADD_64(estats->etherstatsoverrsizepkts_hi,
3876 estats->rx_stat_dot3statsframestoolong_hi,
3877 estats->etherstatsoverrsizepkts_lo,
3878 estats->rx_stat_dot3statsframestoolong_lo);
3879 ADD_64(estats->error_bytes_received_hi,
3880 estats->rx_stat_ifhcinbadoctets_hi,
3881 estats->error_bytes_received_lo,
3882 estats->rx_stat_ifhcinbadoctets_lo);
3885 estats->mac_filter_discard =
3886 le32_to_cpu(tport->mac_filter_discard);
3887 estats->xxoverflow_discard =
3888 le32_to_cpu(tport->xxoverflow_discard);
3889 estats->brb_truncate_discard =
3890 le32_to_cpu(tport->brb_truncate_discard);
3891 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3894 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3896 bp->stats_pending = 0;
3901 static void bnx2x_net_stats_update(struct bnx2x *bp)
3903 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3904 struct net_device_stats *nstats = &bp->dev->stats;
3907 nstats->rx_packets =
3908 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3909 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3910 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3912 nstats->tx_packets =
3913 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3914 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3915 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3917 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3919 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3921 nstats->rx_dropped = estats->mac_discard;
3922 for_each_queue(bp, i)
3923 nstats->rx_dropped +=
3924 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3926 nstats->tx_dropped = 0;
3929 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3931 nstats->collisions =
3932 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3934 nstats->rx_length_errors =
3935 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3936 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3937 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3938 bnx2x_hilo(&estats->brb_truncate_hi);
3939 nstats->rx_crc_errors =
3940 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3941 nstats->rx_frame_errors =
3942 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3943 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3944 nstats->rx_missed_errors = estats->xxoverflow_discard;
3946 nstats->rx_errors = nstats->rx_length_errors +
3947 nstats->rx_over_errors +
3948 nstats->rx_crc_errors +
3949 nstats->rx_frame_errors +
3950 nstats->rx_fifo_errors +
3951 nstats->rx_missed_errors;
3953 nstats->tx_aborted_errors =
3954 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3955 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3956 nstats->tx_carrier_errors =
3957 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3958 nstats->tx_fifo_errors = 0;
3959 nstats->tx_heartbeat_errors = 0;
3960 nstats->tx_window_errors = 0;
3962 nstats->tx_errors = nstats->tx_aborted_errors +
3963 nstats->tx_carrier_errors +
3964 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3967 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3969 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3972 estats->driver_xoff = 0;
3973 estats->rx_err_discard_pkt = 0;
3974 estats->rx_skb_alloc_failed = 0;
3975 estats->hw_csum_err = 0;
3976 for_each_queue(bp, i) {
3977 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3979 estats->driver_xoff += qstats->driver_xoff;
3980 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3981 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3982 estats->hw_csum_err += qstats->hw_csum_err;
3986 static void bnx2x_stats_update(struct bnx2x *bp)
3988 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3990 if (*stats_comp != DMAE_COMP_VAL)
3994 bnx2x_hw_stats_update(bp);
3996 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3997 BNX2X_ERR("storm stats were not updated for 3 times\n");
4002 bnx2x_net_stats_update(bp);
4003 bnx2x_drv_stats_update(bp);
4005 if (bp->msglevel & NETIF_MSG_TIMER) {
4006 struct tstorm_per_client_stats *old_tclient =
4007 &bp->fp->old_tclient;
4008 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4009 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4010 struct net_device_stats *nstats = &bp->dev->stats;
4013 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4014 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4016 bnx2x_tx_avail(bp->fp),
4017 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4018 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4020 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4021 bp->fp->rx_comp_cons),
4022 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4023 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4024 "brb truncate %u\n",
4025 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4026 qstats->driver_xoff,
4027 estats->brb_drop_lo, estats->brb_truncate_lo);
4028 printk(KERN_DEBUG "tstats: checksum_discard %u "
4029 "packets_too_big_discard %lu no_buff_discard %lu "
4030 "mac_discard %u mac_filter_discard %u "
4031 "xxovrflow_discard %u brb_truncate_discard %u "
4032 "ttl0_discard %u\n",
4033 le32_to_cpu(old_tclient->checksum_discard),
4034 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4035 bnx2x_hilo(&qstats->no_buff_discard_hi),
4036 estats->mac_discard, estats->mac_filter_discard,
4037 estats->xxoverflow_discard, estats->brb_truncate_discard,
4038 le32_to_cpu(old_tclient->ttl0_discard));
4040 for_each_queue(bp, i) {
4041 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4042 bnx2x_fp(bp, i, tx_pkt),
4043 bnx2x_fp(bp, i, rx_pkt),
4044 bnx2x_fp(bp, i, rx_calls));
4048 bnx2x_hw_stats_post(bp);
4049 bnx2x_storm_stats_post(bp);
4052 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4054 struct dmae_command *dmae;
4056 int loader_idx = PMF_DMAE_C(bp);
4057 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4059 bp->executer_idx = 0;
4061 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4063 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4065 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4067 DMAE_CMD_ENDIANITY_DW_SWAP |
4069 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4070 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4072 if (bp->port.port_stx) {
4074 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4076 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4078 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4079 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4080 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4081 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4082 dmae->dst_addr_hi = 0;
4083 dmae->len = sizeof(struct host_port_stats) >> 2;
4085 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4086 dmae->comp_addr_hi = 0;
4089 dmae->comp_addr_lo =
4090 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4091 dmae->comp_addr_hi =
4092 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4093 dmae->comp_val = DMAE_COMP_VAL;
4101 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4102 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4103 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4104 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4105 dmae->dst_addr_lo = bp->func_stx >> 2;
4106 dmae->dst_addr_hi = 0;
4107 dmae->len = sizeof(struct host_func_stats) >> 2;
4108 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4109 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4110 dmae->comp_val = DMAE_COMP_VAL;
4116 static void bnx2x_stats_stop(struct bnx2x *bp)
4120 bnx2x_stats_comp(bp);
4123 update = (bnx2x_hw_stats_update(bp) == 0);
4125 update |= (bnx2x_storm_stats_update(bp) == 0);
4128 bnx2x_net_stats_update(bp);
4131 bnx2x_port_stats_stop(bp);
4133 bnx2x_hw_stats_post(bp);
4134 bnx2x_stats_comp(bp);
4138 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4142 static const struct {
4143 void (*action)(struct bnx2x *bp);
4144 enum bnx2x_stats_state next_state;
4145 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4148 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4149 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4150 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4151 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4154 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4155 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4156 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4157 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4161 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4163 enum bnx2x_stats_state state = bp->stats_state;
4165 bnx2x_stats_stm[state][event].action(bp);
4166 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4168 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4169 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4170 state, event, bp->stats_state);
4173 static void bnx2x_timer(unsigned long data)
4175 struct bnx2x *bp = (struct bnx2x *) data;
4177 if (!netif_running(bp->dev))
4180 if (atomic_read(&bp->intr_sem) != 0)
4184 struct bnx2x_fastpath *fp = &bp->fp[0];
4188 rc = bnx2x_rx_int(fp, 1000);
4191 if (!BP_NOMCP(bp)) {
4192 int func = BP_FUNC(bp);
4196 ++bp->fw_drv_pulse_wr_seq;
4197 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4198 /* TBD - add SYSTEM_TIME */
4199 drv_pulse = bp->fw_drv_pulse_wr_seq;
4200 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4202 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4203 MCP_PULSE_SEQ_MASK);
4204 /* The delta between driver pulse and mcp response
4205 * should be 1 (before mcp response) or 0 (after mcp response)
4207 if ((drv_pulse != mcp_pulse) &&
4208 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4209 /* someone lost a heartbeat... */
4210 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4211 drv_pulse, mcp_pulse);
4215 if ((bp->state == BNX2X_STATE_OPEN) ||
4216 (bp->state == BNX2X_STATE_DISABLED))
4217 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4220 mod_timer(&bp->timer, jiffies + bp->current_interval);
4223 /* end of Statistics */
4228 * nic init service functions
4231 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4233 int port = BP_PORT(bp);
4235 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4236 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4237 sizeof(struct ustorm_status_block)/4);
4238 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4239 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4240 sizeof(struct cstorm_status_block)/4);
4243 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4244 dma_addr_t mapping, int sb_id)
4246 int port = BP_PORT(bp);
4247 int func = BP_FUNC(bp);
4252 section = ((u64)mapping) + offsetof(struct host_status_block,
4254 sb->u_status_block.status_block_id = sb_id;
4256 REG_WR(bp, BAR_USTRORM_INTMEM +
4257 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4258 REG_WR(bp, BAR_USTRORM_INTMEM +
4259 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4261 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4262 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4264 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4265 REG_WR16(bp, BAR_USTRORM_INTMEM +
4266 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4269 section = ((u64)mapping) + offsetof(struct host_status_block,
4271 sb->c_status_block.status_block_id = sb_id;
4273 REG_WR(bp, BAR_CSTRORM_INTMEM +
4274 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4275 REG_WR(bp, BAR_CSTRORM_INTMEM +
4276 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4278 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4279 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4281 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4282 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4283 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4285 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4288 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4290 int func = BP_FUNC(bp);
4292 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4293 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4294 sizeof(struct tstorm_def_status_block)/4);
4295 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4296 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4297 sizeof(struct ustorm_def_status_block)/4);
4298 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4299 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4300 sizeof(struct cstorm_def_status_block)/4);
4301 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4302 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4303 sizeof(struct xstorm_def_status_block)/4);
4306 static void bnx2x_init_def_sb(struct bnx2x *bp,
4307 struct host_def_status_block *def_sb,
4308 dma_addr_t mapping, int sb_id)
4310 int port = BP_PORT(bp);
4311 int func = BP_FUNC(bp);
4312 int index, val, reg_offset;
4316 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4317 atten_status_block);
4318 def_sb->atten_status_block.status_block_id = sb_id;
4322 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4323 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4325 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4326 bp->attn_group[index].sig[0] = REG_RD(bp,
4327 reg_offset + 0x10*index);
4328 bp->attn_group[index].sig[1] = REG_RD(bp,
4329 reg_offset + 0x4 + 0x10*index);
4330 bp->attn_group[index].sig[2] = REG_RD(bp,
4331 reg_offset + 0x8 + 0x10*index);
4332 bp->attn_group[index].sig[3] = REG_RD(bp,
4333 reg_offset + 0xc + 0x10*index);
4336 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4337 HC_REG_ATTN_MSG0_ADDR_L);
4339 REG_WR(bp, reg_offset, U64_LO(section));
4340 REG_WR(bp, reg_offset + 4, U64_HI(section));
4342 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4344 val = REG_RD(bp, reg_offset);
4346 REG_WR(bp, reg_offset, val);
4349 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4350 u_def_status_block);
4351 def_sb->u_def_status_block.status_block_id = sb_id;
4353 REG_WR(bp, BAR_USTRORM_INTMEM +
4354 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4355 REG_WR(bp, BAR_USTRORM_INTMEM +
4356 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4358 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4359 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4361 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4362 REG_WR16(bp, BAR_USTRORM_INTMEM +
4363 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4366 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4367 c_def_status_block);
4368 def_sb->c_def_status_block.status_block_id = sb_id;
4370 REG_WR(bp, BAR_CSTRORM_INTMEM +
4371 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4372 REG_WR(bp, BAR_CSTRORM_INTMEM +
4373 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4375 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4376 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4378 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4379 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4380 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4383 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4384 t_def_status_block);
4385 def_sb->t_def_status_block.status_block_id = sb_id;
4387 REG_WR(bp, BAR_TSTRORM_INTMEM +
4388 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4389 REG_WR(bp, BAR_TSTRORM_INTMEM +
4390 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4392 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4393 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4395 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4396 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4397 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4400 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4401 x_def_status_block);
4402 def_sb->x_def_status_block.status_block_id = sb_id;
4404 REG_WR(bp, BAR_XSTRORM_INTMEM +
4405 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4406 REG_WR(bp, BAR_XSTRORM_INTMEM +
4407 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4409 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4410 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4412 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4413 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4414 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4416 bp->stats_pending = 0;
4417 bp->set_mac_pending = 0;
4419 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4422 static void bnx2x_update_coalesce(struct bnx2x *bp)
4424 int port = BP_PORT(bp);
4427 for_each_queue(bp, i) {
4428 int sb_id = bp->fp[i].sb_id;
4430 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4431 REG_WR8(bp, BAR_USTRORM_INTMEM +
4432 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4433 U_SB_ETH_RX_CQ_INDEX),
4435 REG_WR16(bp, BAR_USTRORM_INTMEM +
4436 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4437 U_SB_ETH_RX_CQ_INDEX),
4438 (bp->rx_ticks/12) ? 0 : 1);
4440 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4441 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4442 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4443 C_SB_ETH_TX_CQ_INDEX),
4445 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4446 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4447 C_SB_ETH_TX_CQ_INDEX),
4448 (bp->tx_ticks/12) ? 0 : 1);
4452 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4453 struct bnx2x_fastpath *fp, int last)
4457 for (i = 0; i < last; i++) {
4458 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4459 struct sk_buff *skb = rx_buf->skb;
4462 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4466 if (fp->tpa_state[i] == BNX2X_TPA_START)
4467 pci_unmap_single(bp->pdev,
4468 pci_unmap_addr(rx_buf, mapping),
4469 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4476 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4478 int func = BP_FUNC(bp);
4479 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4480 ETH_MAX_AGGREGATION_QUEUES_E1H;
4481 u16 ring_prod, cqe_ring_prod;
4484 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4486 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4488 if (bp->flags & TPA_ENABLE_FLAG) {
4490 for_each_rx_queue(bp, j) {
4491 struct bnx2x_fastpath *fp = &bp->fp[j];
4493 for (i = 0; i < max_agg_queues; i++) {
4494 fp->tpa_pool[i].skb =
4495 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4496 if (!fp->tpa_pool[i].skb) {
4497 BNX2X_ERR("Failed to allocate TPA "
4498 "skb pool for queue[%d] - "
4499 "disabling TPA on this "
4501 bnx2x_free_tpa_pool(bp, fp, i);
4502 fp->disable_tpa = 1;
4505 pci_unmap_addr_set((struct sw_rx_bd *)
4506 &bp->fp->tpa_pool[i],
4508 fp->tpa_state[i] = BNX2X_TPA_STOP;
4513 for_each_rx_queue(bp, j) {
4514 struct bnx2x_fastpath *fp = &bp->fp[j];
4517 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4518 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4520 /* "next page" elements initialization */
4522 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4523 struct eth_rx_sge *sge;
4525 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4527 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4528 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4530 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4531 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4534 bnx2x_init_sge_ring_bit_mask(fp);
4537 for (i = 1; i <= NUM_RX_RINGS; i++) {
4538 struct eth_rx_bd *rx_bd;
4540 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4542 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4543 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4545 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4546 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4550 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4551 struct eth_rx_cqe_next_page *nextpg;
4553 nextpg = (struct eth_rx_cqe_next_page *)
4554 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4556 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4557 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4559 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4560 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4563 /* Allocate SGEs and initialize the ring elements */
4564 for (i = 0, ring_prod = 0;
4565 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4567 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4568 BNX2X_ERR("was only able to allocate "
4570 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4571 /* Cleanup already allocated elements */
4572 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4573 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4574 fp->disable_tpa = 1;
4578 ring_prod = NEXT_SGE_IDX(ring_prod);
4580 fp->rx_sge_prod = ring_prod;
4582 /* Allocate BDs and initialize BD ring */
4583 fp->rx_comp_cons = 0;
4584 cqe_ring_prod = ring_prod = 0;
4585 for (i = 0; i < bp->rx_ring_size; i++) {
4586 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4587 BNX2X_ERR("was only able to allocate "
4588 "%d rx skbs on queue[%d]\n", i, j);
4589 fp->eth_q_stats.rx_skb_alloc_failed++;
4592 ring_prod = NEXT_RX_IDX(ring_prod);
4593 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4594 WARN_ON(ring_prod <= i);
4597 fp->rx_bd_prod = ring_prod;
4598 /* must not have more available CQEs than BDs */
4599 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4601 fp->rx_pkt = fp->rx_calls = 0;
4604 * this will generate an interrupt (to the TSTORM)
4605 * must only be done after chip is initialized
4607 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4612 REG_WR(bp, BAR_USTRORM_INTMEM +
4613 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4614 U64_LO(fp->rx_comp_mapping));
4615 REG_WR(bp, BAR_USTRORM_INTMEM +
4616 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4617 U64_HI(fp->rx_comp_mapping));
4621 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4625 for_each_tx_queue(bp, j) {
4626 struct bnx2x_fastpath *fp = &bp->fp[j];
4628 for (i = 1; i <= NUM_TX_RINGS; i++) {
4629 struct eth_tx_bd *tx_bd =
4630 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4633 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4634 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4636 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4637 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4640 fp->tx_pkt_prod = 0;
4641 fp->tx_pkt_cons = 0;
4644 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4649 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4651 int func = BP_FUNC(bp);
4653 spin_lock_init(&bp->spq_lock);
4655 bp->spq_left = MAX_SPQ_PENDING;
4656 bp->spq_prod_idx = 0;
4657 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4658 bp->spq_prod_bd = bp->spq;
4659 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4661 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4662 U64_LO(bp->spq_mapping));
4664 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4665 U64_HI(bp->spq_mapping));
4667 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4671 static void bnx2x_init_context(struct bnx2x *bp)
4675 for_each_queue(bp, i) {
4676 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4677 struct bnx2x_fastpath *fp = &bp->fp[i];
4678 u8 cl_id = fp->cl_id;
4679 u8 sb_id = fp->sb_id;
4681 context->ustorm_st_context.common.sb_index_numbers =
4682 BNX2X_RX_SB_INDEX_NUM;
4683 context->ustorm_st_context.common.clientId = cl_id;
4684 context->ustorm_st_context.common.status_block_id = sb_id;
4685 context->ustorm_st_context.common.flags =
4686 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4687 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4688 context->ustorm_st_context.common.statistics_counter_id =
4690 context->ustorm_st_context.common.mc_alignment_log_size =
4691 BNX2X_RX_ALIGN_SHIFT;
4692 context->ustorm_st_context.common.bd_buff_size =
4694 context->ustorm_st_context.common.bd_page_base_hi =
4695 U64_HI(fp->rx_desc_mapping);
4696 context->ustorm_st_context.common.bd_page_base_lo =
4697 U64_LO(fp->rx_desc_mapping);
4698 if (!fp->disable_tpa) {
4699 context->ustorm_st_context.common.flags |=
4700 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4701 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4702 context->ustorm_st_context.common.sge_buff_size =
4703 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4705 context->ustorm_st_context.common.sge_page_base_hi =
4706 U64_HI(fp->rx_sge_mapping);
4707 context->ustorm_st_context.common.sge_page_base_lo =
4708 U64_LO(fp->rx_sge_mapping);
4711 context->ustorm_ag_context.cdu_usage =
4712 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4713 CDU_REGION_NUMBER_UCM_AG,
4714 ETH_CONNECTION_TYPE);
4716 context->xstorm_st_context.tx_bd_page_base_hi =
4717 U64_HI(fp->tx_desc_mapping);
4718 context->xstorm_st_context.tx_bd_page_base_lo =
4719 U64_LO(fp->tx_desc_mapping);
4720 context->xstorm_st_context.db_data_addr_hi =
4721 U64_HI(fp->tx_prods_mapping);
4722 context->xstorm_st_context.db_data_addr_lo =
4723 U64_LO(fp->tx_prods_mapping);
4724 context->xstorm_st_context.statistics_data = (cl_id |
4725 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4726 context->cstorm_st_context.sb_index_number =
4727 C_SB_ETH_TX_CQ_INDEX;
4728 context->cstorm_st_context.status_block_id = sb_id;
4730 context->xstorm_ag_context.cdu_reserved =
4731 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4732 CDU_REGION_NUMBER_XCM_AG,
4733 ETH_CONNECTION_TYPE);
4737 static void bnx2x_init_ind_table(struct bnx2x *bp)
4739 int func = BP_FUNC(bp);
4742 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4746 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4747 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4748 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4749 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4750 bp->fp->cl_id + (i % bp->num_rx_queues));
4753 static void bnx2x_set_client_config(struct bnx2x *bp)
4755 struct tstorm_eth_client_config tstorm_client = {0};
4756 int port = BP_PORT(bp);
4759 tstorm_client.mtu = bp->dev->mtu;
4760 tstorm_client.config_flags =
4761 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4762 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4764 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4765 tstorm_client.config_flags |=
4766 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4767 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4771 if (bp->flags & TPA_ENABLE_FLAG) {
4772 tstorm_client.max_sges_for_packet =
4773 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4774 tstorm_client.max_sges_for_packet =
4775 ((tstorm_client.max_sges_for_packet +
4776 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4777 PAGES_PER_SGE_SHIFT;
4779 tstorm_client.config_flags |=
4780 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4783 for_each_queue(bp, i) {
4784 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4786 REG_WR(bp, BAR_TSTRORM_INTMEM +
4787 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4788 ((u32 *)&tstorm_client)[0]);
4789 REG_WR(bp, BAR_TSTRORM_INTMEM +
4790 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4791 ((u32 *)&tstorm_client)[1]);
4794 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4795 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4798 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4800 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4801 int mode = bp->rx_mode;
4802 int mask = (1 << BP_L_ID(bp));
4803 int func = BP_FUNC(bp);
4806 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4809 case BNX2X_RX_MODE_NONE: /* no Rx */
4810 tstorm_mac_filter.ucast_drop_all = mask;
4811 tstorm_mac_filter.mcast_drop_all = mask;
4812 tstorm_mac_filter.bcast_drop_all = mask;
4815 case BNX2X_RX_MODE_NORMAL:
4816 tstorm_mac_filter.bcast_accept_all = mask;
4819 case BNX2X_RX_MODE_ALLMULTI:
4820 tstorm_mac_filter.mcast_accept_all = mask;
4821 tstorm_mac_filter.bcast_accept_all = mask;
4824 case BNX2X_RX_MODE_PROMISC:
4825 tstorm_mac_filter.ucast_accept_all = mask;
4826 tstorm_mac_filter.mcast_accept_all = mask;
4827 tstorm_mac_filter.bcast_accept_all = mask;
4831 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4835 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4836 REG_WR(bp, BAR_TSTRORM_INTMEM +
4837 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4838 ((u32 *)&tstorm_mac_filter)[i]);
4840 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4841 ((u32 *)&tstorm_mac_filter)[i]); */
4844 if (mode != BNX2X_RX_MODE_NONE)
4845 bnx2x_set_client_config(bp);
4848 static void bnx2x_init_internal_common(struct bnx2x *bp)
4852 if (bp->flags & TPA_ENABLE_FLAG) {
4853 struct tstorm_eth_tpa_exist tpa = {0};
4857 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4859 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4863 /* Zero this manually as its initialization is
4864 currently missing in the initTool */
4865 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4866 REG_WR(bp, BAR_USTRORM_INTMEM +
4867 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4870 static void bnx2x_init_internal_port(struct bnx2x *bp)
4872 int port = BP_PORT(bp);
4874 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4875 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4876 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4877 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4880 /* Calculates the sum of vn_min_rates.
4881 It's needed for further normalizing of the min_rates.
4883 sum of vn_min_rates.
4885 0 - if all the min_rates are 0.
4886 In the later case fainess algorithm should be deactivated.
4887 If not all min_rates are zero then those that are zeroes will be set to 1.
4889 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4892 int port = BP_PORT(bp);
4895 bp->vn_weight_sum = 0;
4896 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4897 int func = 2*vn + port;
4899 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4900 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4901 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4903 /* Skip hidden vns */
4904 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4907 /* If min rate is zero - set it to 1 */
4909 vn_min_rate = DEF_MIN_RATE;
4913 bp->vn_weight_sum += vn_min_rate;
4916 /* ... only if all min rates are zeros - disable fairness */
4918 bp->vn_weight_sum = 0;
4921 static void bnx2x_init_internal_func(struct bnx2x *bp)
4923 struct tstorm_eth_function_common_config tstorm_config = {0};
4924 struct stats_indication_flags stats_flags = {0};
4925 int port = BP_PORT(bp);
4926 int func = BP_FUNC(bp);
4932 tstorm_config.config_flags = MULTI_FLAGS(bp);
4933 tstorm_config.rss_result_mask = MULTI_MASK;
4936 tstorm_config.config_flags |=
4937 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4939 tstorm_config.leading_client_id = BP_L_ID(bp);
4941 REG_WR(bp, BAR_TSTRORM_INTMEM +
4942 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4943 (*(u32 *)&tstorm_config));
4945 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4946 bnx2x_set_storm_rx_mode(bp);
4948 for_each_queue(bp, i) {
4949 u8 cl_id = bp->fp[i].cl_id;
4951 /* reset xstorm per client statistics */
4952 offset = BAR_XSTRORM_INTMEM +
4953 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4955 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4956 REG_WR(bp, offset + j*4, 0);
4958 /* reset tstorm per client statistics */
4959 offset = BAR_TSTRORM_INTMEM +
4960 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4962 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4963 REG_WR(bp, offset + j*4, 0);
4965 /* reset ustorm per client statistics */
4966 offset = BAR_USTRORM_INTMEM +
4967 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4969 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4970 REG_WR(bp, offset + j*4, 0);
4973 /* Init statistics related context */
4974 stats_flags.collect_eth = 1;
4976 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4977 ((u32 *)&stats_flags)[0]);
4978 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4979 ((u32 *)&stats_flags)[1]);
4981 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4982 ((u32 *)&stats_flags)[0]);
4983 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4984 ((u32 *)&stats_flags)[1]);
4986 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4987 ((u32 *)&stats_flags)[0]);
4988 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4989 ((u32 *)&stats_flags)[1]);
4991 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4992 ((u32 *)&stats_flags)[0]);
4993 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4994 ((u32 *)&stats_flags)[1]);
4996 REG_WR(bp, BAR_XSTRORM_INTMEM +
4997 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4998 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4999 REG_WR(bp, BAR_XSTRORM_INTMEM +
5000 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5001 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5003 REG_WR(bp, BAR_TSTRORM_INTMEM +
5004 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5005 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5006 REG_WR(bp, BAR_TSTRORM_INTMEM +
5007 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5008 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5010 REG_WR(bp, BAR_USTRORM_INTMEM +
5011 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5012 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5013 REG_WR(bp, BAR_USTRORM_INTMEM +
5014 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5015 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5017 if (CHIP_IS_E1H(bp)) {
5018 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5020 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5022 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5024 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5027 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5031 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5033 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5034 SGE_PAGE_SIZE * PAGES_PER_SGE),
5036 for_each_rx_queue(bp, i) {
5037 struct bnx2x_fastpath *fp = &bp->fp[i];
5039 REG_WR(bp, BAR_USTRORM_INTMEM +
5040 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5041 U64_LO(fp->rx_comp_mapping));
5042 REG_WR(bp, BAR_USTRORM_INTMEM +
5043 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5044 U64_HI(fp->rx_comp_mapping));
5046 REG_WR16(bp, BAR_USTRORM_INTMEM +
5047 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5051 /* dropless flow control */
5052 if (CHIP_IS_E1H(bp)) {
5053 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5055 rx_pause.bd_thr_low = 250;
5056 rx_pause.cqe_thr_low = 250;
5058 rx_pause.sge_thr_low = 0;
5059 rx_pause.bd_thr_high = 350;
5060 rx_pause.cqe_thr_high = 350;
5061 rx_pause.sge_thr_high = 0;
5063 for_each_rx_queue(bp, i) {
5064 struct bnx2x_fastpath *fp = &bp->fp[i];
5066 if (!fp->disable_tpa) {
5067 rx_pause.sge_thr_low = 150;
5068 rx_pause.sge_thr_high = 250;
5072 offset = BAR_USTRORM_INTMEM +
5073 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5076 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5078 REG_WR(bp, offset + j*4,
5079 ((u32 *)&rx_pause)[j]);
5083 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5085 /* Init rate shaping and fairness contexts */
5089 /* During init there is no active link
5090 Until link is up, set link rate to 10Gbps */
5091 bp->link_vars.line_speed = SPEED_10000;
5092 bnx2x_init_port_minmax(bp);
5094 bnx2x_calc_vn_weight_sum(bp);
5096 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5097 bnx2x_init_vn_minmax(bp, 2*vn + port);
5099 /* Enable rate shaping and fairness */
5100 bp->cmng.flags.cmng_enables =
5101 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5102 if (bp->vn_weight_sum)
5103 bp->cmng.flags.cmng_enables |=
5104 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5106 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5107 " fairness will be disabled\n");
5109 /* rate shaping and fairness are disabled */
5111 "single function mode minmax will be disabled\n");
5115 /* Store it to internal memory */
5117 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5118 REG_WR(bp, BAR_XSTRORM_INTMEM +
5119 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5120 ((u32 *)(&bp->cmng))[i]);
5123 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5125 switch (load_code) {
5126 case FW_MSG_CODE_DRV_LOAD_COMMON:
5127 bnx2x_init_internal_common(bp);
5130 case FW_MSG_CODE_DRV_LOAD_PORT:
5131 bnx2x_init_internal_port(bp);
5134 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5135 bnx2x_init_internal_func(bp);
5139 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5144 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5148 for_each_queue(bp, i) {
5149 struct bnx2x_fastpath *fp = &bp->fp[i];
5152 fp->state = BNX2X_FP_STATE_CLOSED;
5154 fp->cl_id = BP_L_ID(bp) + i;
5155 fp->sb_id = fp->cl_id;
5157 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5158 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5159 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5161 bnx2x_update_fpsb_idx(fp);
5164 /* ensure status block indices were read */
5168 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5170 bnx2x_update_dsb_idx(bp);
5171 bnx2x_update_coalesce(bp);
5172 bnx2x_init_rx_rings(bp);
5173 bnx2x_init_tx_ring(bp);
5174 bnx2x_init_sp_ring(bp);
5175 bnx2x_init_context(bp);
5176 bnx2x_init_internal(bp, load_code);
5177 bnx2x_init_ind_table(bp);
5178 bnx2x_stats_init(bp);
5180 /* At this point, we are ready for interrupts */
5181 atomic_set(&bp->intr_sem, 0);
5183 /* flush all before enabling interrupts */
5187 bnx2x_int_enable(bp);
5189 /* Check for SPIO5 */
5190 bnx2x_attn_int_deasserted0(bp,
5191 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5192 AEU_INPUTS_ATTN_BITS_SPIO5);
5195 /* end of nic init */
5198 * gzip service functions
5201 static int bnx2x_gunzip_init(struct bnx2x *bp)
5203 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5204 &bp->gunzip_mapping);
5205 if (bp->gunzip_buf == NULL)
5208 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5209 if (bp->strm == NULL)
5212 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5214 if (bp->strm->workspace == NULL)
5224 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5225 bp->gunzip_mapping);
5226 bp->gunzip_buf = NULL;
5229 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5230 " un-compression\n", bp->dev->name);
5234 static void bnx2x_gunzip_end(struct bnx2x *bp)
5236 kfree(bp->strm->workspace);
5241 if (bp->gunzip_buf) {
5242 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5243 bp->gunzip_mapping);
5244 bp->gunzip_buf = NULL;
5248 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5252 /* check gzip header */
5253 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5254 BNX2X_ERR("Bad gzip header\n");
5262 if (zbuf[3] & FNAME)
5263 while ((zbuf[n++] != 0) && (n < len));
5265 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5266 bp->strm->avail_in = len - n;
5267 bp->strm->next_out = bp->gunzip_buf;
5268 bp->strm->avail_out = FW_BUF_SIZE;
5270 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5274 rc = zlib_inflate(bp->strm, Z_FINISH);
5275 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5276 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5277 bp->dev->name, bp->strm->msg);
5279 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5280 if (bp->gunzip_outlen & 0x3)
5281 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5282 " gunzip_outlen (%d) not aligned\n",
5283 bp->dev->name, bp->gunzip_outlen);
5284 bp->gunzip_outlen >>= 2;
5286 zlib_inflateEnd(bp->strm);
5288 if (rc == Z_STREAM_END)
5294 /* nic load/unload */
5297 * General service functions
5300 /* send a NIG loopback debug packet */
5301 static void bnx2x_lb_pckt(struct bnx2x *bp)
5305 /* Ethernet source and destination addresses */
5306 wb_write[0] = 0x55555555;
5307 wb_write[1] = 0x55555555;
5308 wb_write[2] = 0x20; /* SOP */
5309 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5311 /* NON-IP protocol */
5312 wb_write[0] = 0x09000000;
5313 wb_write[1] = 0x55555555;
5314 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5315 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5318 /* some of the internal memories
5319 * are not directly readable from the driver
5320 * to test them we send debug packets
5322 static int bnx2x_int_mem_test(struct bnx2x *bp)
5328 if (CHIP_REV_IS_FPGA(bp))
5330 else if (CHIP_REV_IS_EMUL(bp))
5335 DP(NETIF_MSG_HW, "start part1\n");
5337 /* Disable inputs of parser neighbor blocks */
5338 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5339 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5340 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5341 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5343 /* Write 0 to parser credits for CFC search request */
5344 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5346 /* send Ethernet packet */
5349 /* TODO do i reset NIG statistic? */
5350 /* Wait until NIG register shows 1 packet of size 0x10 */
5351 count = 1000 * factor;
5354 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5355 val = *bnx2x_sp(bp, wb_data[0]);
5363 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5367 /* Wait until PRS register shows 1 packet */
5368 count = 1000 * factor;
5370 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5378 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5382 /* Reset and init BRB, PRS */
5383 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5385 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5387 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5388 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5390 DP(NETIF_MSG_HW, "part2\n");
5392 /* Disable inputs of parser neighbor blocks */
5393 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5394 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5395 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5396 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5398 /* Write 0 to parser credits for CFC search request */
5399 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5401 /* send 10 Ethernet packets */
5402 for (i = 0; i < 10; i++)
5405 /* Wait until NIG register shows 10 + 1
5406 packets of size 11*0x10 = 0xb0 */
5407 count = 1000 * factor;
5410 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5411 val = *bnx2x_sp(bp, wb_data[0]);
5419 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5423 /* Wait until PRS register shows 2 packets */
5424 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5426 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5428 /* Write 1 to parser credits for CFC search request */
5429 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5431 /* Wait until PRS register shows 3 packets */
5432 msleep(10 * factor);
5433 /* Wait until NIG register shows 1 packet of size 0x10 */
5434 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5436 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5438 /* clear NIG EOP FIFO */
5439 for (i = 0; i < 11; i++)
5440 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5441 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5443 BNX2X_ERR("clear of NIG failed\n");
5447 /* Reset and init BRB, PRS, NIG */
5448 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5450 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5452 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5453 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5456 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5459 /* Enable inputs of parser neighbor blocks */
5460 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5461 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5462 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5463 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5465 DP(NETIF_MSG_HW, "done\n");
5470 static void enable_blocks_attention(struct bnx2x *bp)
5472 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5473 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5474 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5475 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5476 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5477 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5478 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5479 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5480 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5481 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5482 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5483 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5484 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5485 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5486 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5487 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5488 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5489 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5490 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5491 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5492 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5493 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5494 if (CHIP_REV_IS_FPGA(bp))
5495 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5497 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5498 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5499 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5500 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5501 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5502 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5503 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5504 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5505 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5506 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5510 static void bnx2x_reset_common(struct bnx2x *bp)
5513 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5515 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5519 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5525 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5526 SHARED_HW_CFG_FAN_FAILURE_MASK;
5528 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5532 * The fan failure mechanism is usually related to the PHY type since
5533 * the power consumption of the board is affected by the PHY. Currently,
5534 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5536 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5537 for (port = PORT_0; port < PORT_MAX; port++) {
5539 SHMEM_RD(bp, dev_info.port_hw_config[port].
5540 external_phy_config) &
5541 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5544 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5546 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5549 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5551 if (is_required == 0)
5554 /* Fan failure is indicated by SPIO 5 */
5555 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5556 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5558 /* set to active low mode */
5559 val = REG_RD(bp, MISC_REG_SPIO_INT);
5560 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5561 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5562 REG_WR(bp, MISC_REG_SPIO_INT, val);
5564 /* enable interrupt to signal the IGU */
5565 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5566 val |= (1 << MISC_REGISTERS_SPIO_5);
5567 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5570 static int bnx2x_init_common(struct bnx2x *bp)
5574 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5576 bnx2x_reset_common(bp);
5577 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5578 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5580 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5581 if (CHIP_IS_E1H(bp))
5582 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5584 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5586 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5588 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5589 if (CHIP_IS_E1(bp)) {
5590 /* enable HW interrupt from PXP on USDM overflow
5591 bit 16 on INT_MASK_0 */
5592 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5595 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5599 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5600 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5601 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5602 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5603 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5604 /* make sure this value is 0 */
5605 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5607 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5608 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5609 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5610 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5611 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5614 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5616 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5617 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5618 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5621 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5622 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5624 /* let the HW do it's magic ... */
5626 /* finish PXP init */
5627 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5629 BNX2X_ERR("PXP2 CFG failed\n");
5632 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5634 BNX2X_ERR("PXP2 RD_INIT failed\n");
5638 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5639 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5641 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5643 /* clean the DMAE memory */
5645 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5647 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5648 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5649 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5650 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5652 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5653 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5654 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5655 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5657 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5658 /* soft reset pulse */
5659 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5660 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5663 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5666 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5667 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5668 if (!CHIP_REV_IS_SLOW(bp)) {
5669 /* enable hw interrupt from doorbell Q */
5670 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5673 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5674 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5675 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5677 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5678 if (CHIP_IS_E1H(bp))
5679 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5681 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5682 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5683 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5684 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5686 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5687 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5688 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5689 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5691 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5692 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5693 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5694 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5697 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5699 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5702 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5703 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5704 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5706 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5707 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5708 REG_WR(bp, i, 0xc0cac01a);
5709 /* TODO: replace with something meaningful */
5711 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5712 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5714 if (sizeof(union cdu_context) != 1024)
5715 /* we currently assume that a context is 1024 bytes */
5716 printk(KERN_ALERT PFX "please adjust the size of"
5717 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5719 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5720 val = (4 << 24) + (0 << 12) + 1024;
5721 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5722 if (CHIP_IS_E1(bp)) {
5723 /* !!! fix pxp client crdit until excel update */
5724 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5725 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5728 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5729 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5730 /* enable context validation interrupt from CFC */
5731 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5733 /* set the thresholds to prevent CFC/CDU race */
5734 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5736 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5737 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5739 /* PXPCS COMMON comes here */
5740 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5741 /* Reset PCIE errors for debug */
5742 REG_WR(bp, 0x2814, 0xffffffff);
5743 REG_WR(bp, 0x3820, 0xffffffff);
5745 /* EMAC0 COMMON comes here */
5746 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5747 /* EMAC1 COMMON comes here */
5748 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5749 /* DBU COMMON comes here */
5750 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5751 /* DBG COMMON comes here */
5752 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5754 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5755 if (CHIP_IS_E1H(bp)) {
5756 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5757 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5760 if (CHIP_REV_IS_SLOW(bp))
5763 /* finish CFC init */
5764 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5766 BNX2X_ERR("CFC LL_INIT failed\n");
5769 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5771 BNX2X_ERR("CFC AC_INIT failed\n");
5774 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5776 BNX2X_ERR("CFC CAM_INIT failed\n");
5779 REG_WR(bp, CFC_REG_DEBUG0, 0);
5781 /* read NIG statistic
5782 to see if this is our first up since powerup */
5783 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5784 val = *bnx2x_sp(bp, wb_data[0]);
5786 /* do internal memory self test */
5787 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5788 BNX2X_ERR("internal mem self test failed\n");
5792 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5793 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5794 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5795 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5796 bp->port.need_hw_lock = 1;
5803 bnx2x_setup_fan_failure_detection(bp);
5805 /* clear PXP2 attentions */
5806 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5808 enable_blocks_attention(bp);
5810 if (!BP_NOMCP(bp)) {
5811 bnx2x_acquire_phy_lock(bp);
5812 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5813 bnx2x_release_phy_lock(bp);
5815 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5820 static int bnx2x_init_port(struct bnx2x *bp)
5822 int port = BP_PORT(bp);
5823 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5827 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5829 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5831 /* Port PXP comes here */
5832 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5833 /* Port PXP2 comes here */
5834 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5839 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5840 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5841 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5842 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5847 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5848 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5849 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5850 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5855 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5856 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5857 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5858 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5860 /* Port CMs come here */
5861 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5863 /* Port QM comes here */
5865 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5866 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5868 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5870 /* Port DQ comes here */
5871 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5873 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5874 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5875 /* no pause for emulation and FPGA */
5880 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5881 else if (bp->dev->mtu > 4096) {
5882 if (bp->flags & ONE_PORT_FLAG)
5886 /* (24*1024 + val*4)/256 */
5887 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5890 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5891 high = low + 56; /* 14*1024/256 */
5893 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5894 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5897 /* Port PRS comes here */
5898 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5899 /* Port TSDM comes here */
5900 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5901 /* Port CSDM comes here */
5902 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5903 /* Port USDM comes here */
5904 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5905 /* Port XSDM comes here */
5906 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5908 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5909 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5910 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5911 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5913 /* Port UPB comes here */
5914 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5915 /* Port XPB comes here */
5916 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5918 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5920 /* configure PBF to work without PAUSE mtu 9000 */
5921 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5923 /* update threshold */
5924 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5925 /* update init credit */
5926 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5929 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5931 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5934 /* tell the searcher where the T2 table is */
5935 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5937 wb_write[0] = U64_LO(bp->t2_mapping);
5938 wb_write[1] = U64_HI(bp->t2_mapping);
5939 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5940 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5941 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5942 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5944 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5945 /* Port SRCH comes here */
5947 /* Port CDU comes here */
5948 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5949 /* Port CFC comes here */
5950 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5952 if (CHIP_IS_E1(bp)) {
5953 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5954 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5956 bnx2x_init_block(bp, HC_BLOCK, init_stage);
5958 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5959 /* init aeu_mask_attn_func_0/1:
5960 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5961 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5962 * bits 4-7 are used for "per vn group attention" */
5963 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5964 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5966 /* Port PXPCS comes here */
5967 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5968 /* Port EMAC0 comes here */
5969 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5970 /* Port EMAC1 comes here */
5971 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5972 /* Port DBU comes here */
5973 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5974 /* Port DBG comes here */
5975 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5977 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5979 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5981 if (CHIP_IS_E1H(bp)) {
5982 /* 0x2 disable e1hov, 0x1 enable */
5983 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5984 (IS_E1HMF(bp) ? 0x1 : 0x2));
5986 /* support pause requests from USDM, TSDM and BRB */
5987 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5990 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5991 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5992 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5996 /* Port MCP comes here */
5997 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5998 /* Port DMAE comes here */
5999 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6001 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6004 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6006 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6007 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6009 /* The GPIO should be swapped if the swap register is
6011 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6012 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6014 /* Select function upon port-swap configuration */
6016 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6017 aeu_gpio_mask = (swap_val && swap_override) ?
6018 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6019 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6021 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6022 aeu_gpio_mask = (swap_val && swap_override) ?
6023 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6024 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6026 val = REG_RD(bp, offset);
6027 /* add GPIO3 to group */
6028 val |= aeu_gpio_mask;
6029 REG_WR(bp, offset, val);
6033 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6034 /* add SPIO 5 to group 0 */
6035 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6036 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6037 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6044 bnx2x__link_reset(bp);
6049 #define ILT_PER_FUNC (768/2)
6050 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6051 /* the phys address is shifted right 12 bits and has an added
6052 1=valid bit added to the 53rd bit
6053 then since this is a wide register(TM)
6054 we split it into two 32 bit writes
6056 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6057 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6058 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6059 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6061 #define CNIC_ILT_LINES 0
6063 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6067 if (CHIP_IS_E1H(bp))
6068 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6070 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6072 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6075 static int bnx2x_init_func(struct bnx2x *bp)
6077 int port = BP_PORT(bp);
6078 int func = BP_FUNC(bp);
6082 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6084 /* set MSI reconfigure capability */
6085 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6086 val = REG_RD(bp, addr);
6087 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6088 REG_WR(bp, addr, val);
6090 i = FUNC_ILT_BASE(func);
6092 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6093 if (CHIP_IS_E1H(bp)) {
6094 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6095 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6097 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6098 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6101 if (CHIP_IS_E1H(bp)) {
6102 for (i = 0; i < 9; i++)
6103 bnx2x_init_block(bp,
6104 cm_blocks[i], FUNC0_STAGE + func);
6106 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6107 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6110 /* HC init per function */
6111 if (CHIP_IS_E1H(bp)) {
6112 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6114 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6115 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6117 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6119 /* Reset PCIE errors for debug */
6120 REG_WR(bp, 0x2114, 0xffffffff);
6121 REG_WR(bp, 0x2120, 0xffffffff);
6126 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6130 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6131 BP_FUNC(bp), load_code);
6134 mutex_init(&bp->dmae_mutex);
6135 bnx2x_gunzip_init(bp);
6137 switch (load_code) {
6138 case FW_MSG_CODE_DRV_LOAD_COMMON:
6139 rc = bnx2x_init_common(bp);
6144 case FW_MSG_CODE_DRV_LOAD_PORT:
6146 rc = bnx2x_init_port(bp);
6151 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6153 rc = bnx2x_init_func(bp);
6159 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6163 if (!BP_NOMCP(bp)) {
6164 int func = BP_FUNC(bp);
6166 bp->fw_drv_pulse_wr_seq =
6167 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6168 DRV_PULSE_SEQ_MASK);
6169 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6170 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6171 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6175 /* this needs to be done before gunzip end */
6176 bnx2x_zero_def_sb(bp);
6177 for_each_queue(bp, i)
6178 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6181 bnx2x_gunzip_end(bp);
6186 /* send the MCP a request, block until there is a reply */
6187 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6189 int func = BP_FUNC(bp);
6190 u32 seq = ++bp->fw_seq;
6193 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6195 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6196 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6199 /* let the FW do it's magic ... */
6202 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6204 /* Give the FW up to 2 second (200*10ms) */
6205 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6207 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6208 cnt*delay, rc, seq);
6210 /* is this a reply to our command? */
6211 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6212 rc &= FW_MSG_CODE_MASK;
6216 BNX2X_ERR("FW failed to respond!\n");
6224 static void bnx2x_free_mem(struct bnx2x *bp)
6227 #define BNX2X_PCI_FREE(x, y, size) \
6230 pci_free_consistent(bp->pdev, size, x, y); \
6236 #define BNX2X_FREE(x) \
6248 for_each_queue(bp, i) {
6251 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6252 bnx2x_fp(bp, i, status_blk_mapping),
6253 sizeof(struct host_status_block) +
6254 sizeof(struct eth_tx_db_data));
6257 for_each_rx_queue(bp, i) {
6259 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6260 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6261 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6262 bnx2x_fp(bp, i, rx_desc_mapping),
6263 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6265 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6266 bnx2x_fp(bp, i, rx_comp_mapping),
6267 sizeof(struct eth_fast_path_rx_cqe) *
6271 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6272 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6273 bnx2x_fp(bp, i, rx_sge_mapping),
6274 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6277 for_each_tx_queue(bp, i) {
6279 /* fastpath tx rings: tx_buf tx_desc */
6280 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6281 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6282 bnx2x_fp(bp, i, tx_desc_mapping),
6283 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6285 /* end of fastpath */
6287 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6288 sizeof(struct host_def_status_block));
6290 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6291 sizeof(struct bnx2x_slowpath));
6294 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6295 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6296 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6297 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6299 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6301 #undef BNX2X_PCI_FREE
6305 static int bnx2x_alloc_mem(struct bnx2x *bp)
6308 #define BNX2X_PCI_ALLOC(x, y, size) \
6310 x = pci_alloc_consistent(bp->pdev, size, y); \
6312 goto alloc_mem_err; \
6313 memset(x, 0, size); \
6316 #define BNX2X_ALLOC(x, size) \
6318 x = vmalloc(size); \
6320 goto alloc_mem_err; \
6321 memset(x, 0, size); \
6328 for_each_queue(bp, i) {
6329 bnx2x_fp(bp, i, bp) = bp;
6332 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6333 &bnx2x_fp(bp, i, status_blk_mapping),
6334 sizeof(struct host_status_block) +
6335 sizeof(struct eth_tx_db_data));
6338 for_each_rx_queue(bp, i) {
6340 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6341 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6342 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6343 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6344 &bnx2x_fp(bp, i, rx_desc_mapping),
6345 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6347 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6348 &bnx2x_fp(bp, i, rx_comp_mapping),
6349 sizeof(struct eth_fast_path_rx_cqe) *
6353 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6354 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6355 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6356 &bnx2x_fp(bp, i, rx_sge_mapping),
6357 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6360 for_each_tx_queue(bp, i) {
6362 bnx2x_fp(bp, i, hw_tx_prods) =
6363 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6365 bnx2x_fp(bp, i, tx_prods_mapping) =
6366 bnx2x_fp(bp, i, status_blk_mapping) +
6367 sizeof(struct host_status_block);
6369 /* fastpath tx rings: tx_buf tx_desc */
6370 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6371 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6372 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6373 &bnx2x_fp(bp, i, tx_desc_mapping),
6374 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6376 /* end of fastpath */
6378 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6379 sizeof(struct host_def_status_block));
6381 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6382 sizeof(struct bnx2x_slowpath));
6385 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6388 for (i = 0; i < 64*1024; i += 64) {
6389 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6390 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6393 /* allocate searcher T2 table
6394 we allocate 1/4 of alloc num for T2
6395 (which is not entered into the ILT) */
6396 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6399 for (i = 0; i < 16*1024; i += 64)
6400 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6402 /* now fixup the last line in the block to point to the next block */
6403 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6405 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6406 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6408 /* QM queues (128*MAX_CONN) */
6409 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6412 /* Slow path ring */
6413 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6421 #undef BNX2X_PCI_ALLOC
6425 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6429 for_each_tx_queue(bp, i) {
6430 struct bnx2x_fastpath *fp = &bp->fp[i];
6432 u16 bd_cons = fp->tx_bd_cons;
6433 u16 sw_prod = fp->tx_pkt_prod;
6434 u16 sw_cons = fp->tx_pkt_cons;
6436 while (sw_cons != sw_prod) {
6437 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6443 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6447 for_each_rx_queue(bp, j) {
6448 struct bnx2x_fastpath *fp = &bp->fp[j];
6450 for (i = 0; i < NUM_RX_BD; i++) {
6451 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6452 struct sk_buff *skb = rx_buf->skb;
6457 pci_unmap_single(bp->pdev,
6458 pci_unmap_addr(rx_buf, mapping),
6459 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6464 if (!fp->disable_tpa)
6465 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6466 ETH_MAX_AGGREGATION_QUEUES_E1 :
6467 ETH_MAX_AGGREGATION_QUEUES_E1H);
6471 static void bnx2x_free_skbs(struct bnx2x *bp)
6473 bnx2x_free_tx_skbs(bp);
6474 bnx2x_free_rx_skbs(bp);
6477 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6481 free_irq(bp->msix_table[0].vector, bp->dev);
6482 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6483 bp->msix_table[0].vector);
6485 for_each_queue(bp, i) {
6486 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6487 "state %x\n", i, bp->msix_table[i + offset].vector,
6488 bnx2x_fp(bp, i, state));
6490 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6494 static void bnx2x_free_irq(struct bnx2x *bp)
6496 if (bp->flags & USING_MSIX_FLAG) {
6497 bnx2x_free_msix_irqs(bp);
6498 pci_disable_msix(bp->pdev);
6499 bp->flags &= ~USING_MSIX_FLAG;
6501 } else if (bp->flags & USING_MSI_FLAG) {
6502 free_irq(bp->pdev->irq, bp->dev);
6503 pci_disable_msi(bp->pdev);
6504 bp->flags &= ~USING_MSI_FLAG;
6507 free_irq(bp->pdev->irq, bp->dev);
6510 static int bnx2x_enable_msix(struct bnx2x *bp)
6512 int i, rc, offset = 1;
6515 bp->msix_table[0].entry = igu_vec;
6516 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6518 for_each_queue(bp, i) {
6519 igu_vec = BP_L_ID(bp) + offset + i;
6520 bp->msix_table[i + offset].entry = igu_vec;
6521 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6522 "(fastpath #%u)\n", i + offset, igu_vec, i);
6525 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6526 BNX2X_NUM_QUEUES(bp) + offset);
6528 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6532 bp->flags |= USING_MSIX_FLAG;
6537 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6539 int i, rc, offset = 1;
6541 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6542 bp->dev->name, bp->dev);
6544 BNX2X_ERR("request sp irq failed\n");
6548 for_each_queue(bp, i) {
6549 struct bnx2x_fastpath *fp = &bp->fp[i];
6551 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6552 rc = request_irq(bp->msix_table[i + offset].vector,
6553 bnx2x_msix_fp_int, 0, fp->name, fp);
6555 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6556 bnx2x_free_msix_irqs(bp);
6560 fp->state = BNX2X_FP_STATE_IRQ;
6563 i = BNX2X_NUM_QUEUES(bp);
6565 printk(KERN_INFO PFX
6566 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6567 bp->dev->name, bp->msix_table[0].vector,
6568 bp->msix_table[offset].vector,
6569 bp->msix_table[offset + i - 1].vector);
6571 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6572 bp->dev->name, bp->msix_table[0].vector,
6573 bp->msix_table[offset + i - 1].vector);
6578 static int bnx2x_enable_msi(struct bnx2x *bp)
6582 rc = pci_enable_msi(bp->pdev);
6584 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6587 bp->flags |= USING_MSI_FLAG;
6592 static int bnx2x_req_irq(struct bnx2x *bp)
6594 unsigned long flags;
6597 if (bp->flags & USING_MSI_FLAG)
6600 flags = IRQF_SHARED;
6602 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6603 bp->dev->name, bp->dev);
6605 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6610 static void bnx2x_napi_enable(struct bnx2x *bp)
6614 for_each_rx_queue(bp, i)
6615 napi_enable(&bnx2x_fp(bp, i, napi));
6618 static void bnx2x_napi_disable(struct bnx2x *bp)
6622 for_each_rx_queue(bp, i)
6623 napi_disable(&bnx2x_fp(bp, i, napi));
6626 static void bnx2x_netif_start(struct bnx2x *bp)
6628 if (atomic_dec_and_test(&bp->intr_sem)) {
6629 if (netif_running(bp->dev)) {
6630 bnx2x_napi_enable(bp);
6631 bnx2x_int_enable(bp);
6632 if (bp->state == BNX2X_STATE_OPEN)
6633 netif_tx_wake_all_queues(bp->dev);
6638 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6640 bnx2x_int_disable_sync(bp, disable_hw);
6641 bnx2x_napi_disable(bp);
6642 netif_tx_disable(bp->dev);
6643 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6647 * Init service functions
6650 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6652 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6653 int port = BP_PORT(bp);
6656 * unicasts 0-31:port0 32-63:port1
6657 * multicast 64-127:port0 128-191:port1
6659 config->hdr.length = 2;
6660 config->hdr.offset = port ? 32 : 0;
6661 config->hdr.client_id = bp->fp->cl_id;
6662 config->hdr.reserved1 = 0;
6665 config->config_table[0].cam_entry.msb_mac_addr =
6666 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6667 config->config_table[0].cam_entry.middle_mac_addr =
6668 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6669 config->config_table[0].cam_entry.lsb_mac_addr =
6670 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6671 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6673 config->config_table[0].target_table_entry.flags = 0;
6675 CAM_INVALIDATE(config->config_table[0]);
6676 config->config_table[0].target_table_entry.client_id = 0;
6677 config->config_table[0].target_table_entry.vlan_id = 0;
6679 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6680 (set ? "setting" : "clearing"),
6681 config->config_table[0].cam_entry.msb_mac_addr,
6682 config->config_table[0].cam_entry.middle_mac_addr,
6683 config->config_table[0].cam_entry.lsb_mac_addr);
6686 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6687 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6688 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6689 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6691 config->config_table[1].target_table_entry.flags =
6692 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6694 CAM_INVALIDATE(config->config_table[1]);
6695 config->config_table[1].target_table_entry.client_id = 0;
6696 config->config_table[1].target_table_entry.vlan_id = 0;
6698 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6699 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6700 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6703 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6705 struct mac_configuration_cmd_e1h *config =
6706 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6708 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6709 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6713 /* CAM allocation for E1H
6714 * unicasts: by func number
6715 * multicast: 20+FUNC*20, 20 each
6717 config->hdr.length = 1;
6718 config->hdr.offset = BP_FUNC(bp);
6719 config->hdr.client_id = bp->fp->cl_id;
6720 config->hdr.reserved1 = 0;
6723 config->config_table[0].msb_mac_addr =
6724 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6725 config->config_table[0].middle_mac_addr =
6726 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6727 config->config_table[0].lsb_mac_addr =
6728 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6729 config->config_table[0].client_id = BP_L_ID(bp);
6730 config->config_table[0].vlan_id = 0;
6731 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6733 config->config_table[0].flags = BP_PORT(bp);
6735 config->config_table[0].flags =
6736 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6738 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6739 (set ? "setting" : "clearing"),
6740 config->config_table[0].msb_mac_addr,
6741 config->config_table[0].middle_mac_addr,
6742 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6744 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6745 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6746 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6749 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6750 int *state_p, int poll)
6752 /* can take a while if any port is running */
6755 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6756 poll ? "polling" : "waiting", state, idx);
6761 bnx2x_rx_int(bp->fp, 10);
6762 /* if index is different from 0
6763 * the reply for some commands will
6764 * be on the non default queue
6767 bnx2x_rx_int(&bp->fp[idx], 10);
6770 mb(); /* state is changed by bnx2x_sp_event() */
6771 if (*state_p == state) {
6772 #ifdef BNX2X_STOP_ON_ERROR
6773 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6782 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6783 poll ? "polling" : "waiting", state, idx);
6784 #ifdef BNX2X_STOP_ON_ERROR
6791 static int bnx2x_setup_leading(struct bnx2x *bp)
6795 /* reset IGU state */
6796 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6799 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6801 /* Wait for completion */
6802 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6807 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6809 struct bnx2x_fastpath *fp = &bp->fp[index];
6811 /* reset IGU state */
6812 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6815 fp->state = BNX2X_FP_STATE_OPENING;
6816 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6819 /* Wait for completion */
6820 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6824 static int bnx2x_poll(struct napi_struct *napi, int budget);
6826 static void bnx2x_set_int_mode(struct bnx2x *bp)
6834 bp->num_rx_queues = num_queues;
6835 bp->num_tx_queues = num_queues;
6837 "set number of queues to %d\n", num_queues);
6842 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6843 num_queues = min_t(u32, num_online_cpus(),
6844 BNX2X_MAX_QUEUES(bp));
6847 bp->num_rx_queues = num_queues;
6848 bp->num_tx_queues = num_queues;
6849 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6850 " number of tx queues to %d\n",
6851 bp->num_rx_queues, bp->num_tx_queues);
6852 /* if we can't use MSI-X we only need one fp,
6853 * so try to enable MSI-X with the requested number of fp's
6854 * and fallback to MSI or legacy INTx with one fp
6856 if (bnx2x_enable_msix(bp)) {
6857 /* failed to enable MSI-X */
6859 bp->num_rx_queues = num_queues;
6860 bp->num_tx_queues = num_queues;
6862 BNX2X_ERR("Multi requested but failed to "
6863 "enable MSI-X set number of "
6864 "queues to %d\n", num_queues);
6868 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6871 static void bnx2x_set_rx_mode(struct net_device *dev);
6873 /* must be called with rtnl_lock */
6874 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6878 #ifdef BNX2X_STOP_ON_ERROR
6879 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6880 if (unlikely(bp->panic))
6884 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6886 bnx2x_set_int_mode(bp);
6888 if (bnx2x_alloc_mem(bp))
6891 for_each_rx_queue(bp, i)
6892 bnx2x_fp(bp, i, disable_tpa) =
6893 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6895 for_each_rx_queue(bp, i)
6896 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6899 #ifdef BNX2X_STOP_ON_ERROR
6900 for_each_rx_queue(bp, i) {
6901 struct bnx2x_fastpath *fp = &bp->fp[i];
6903 fp->poll_no_work = 0;
6905 fp->poll_max_calls = 0;
6906 fp->poll_complete = 0;
6910 bnx2x_napi_enable(bp);
6912 if (bp->flags & USING_MSIX_FLAG) {
6913 rc = bnx2x_req_msix_irqs(bp);
6915 pci_disable_msix(bp->pdev);
6919 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6920 bnx2x_enable_msi(bp);
6922 rc = bnx2x_req_irq(bp);
6924 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6925 if (bp->flags & USING_MSI_FLAG)
6926 pci_disable_msi(bp->pdev);
6929 if (bp->flags & USING_MSI_FLAG) {
6930 bp->dev->irq = bp->pdev->irq;
6931 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6932 bp->dev->name, bp->pdev->irq);
6936 /* Send LOAD_REQUEST command to MCP
6937 Returns the type of LOAD command:
6938 if it is the first port to be initialized
6939 common blocks should be initialized, otherwise - not
6941 if (!BP_NOMCP(bp)) {
6942 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6944 BNX2X_ERR("MCP response failure, aborting\n");
6948 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6949 rc = -EBUSY; /* other port in diagnostic mode */
6954 int port = BP_PORT(bp);
6956 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
6957 load_count[0], load_count[1], load_count[2]);
6959 load_count[1 + port]++;
6960 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
6961 load_count[0], load_count[1], load_count[2]);
6962 if (load_count[0] == 1)
6963 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6964 else if (load_count[1 + port] == 1)
6965 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6967 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6970 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6971 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6975 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6978 rc = bnx2x_init_hw(bp, load_code);
6980 BNX2X_ERR("HW init failed, aborting\n");
6984 /* Setup NIC internals and enable interrupts */
6985 bnx2x_nic_init(bp, load_code);
6987 /* Send LOAD_DONE command to MCP */
6988 if (!BP_NOMCP(bp)) {
6989 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6991 BNX2X_ERR("MCP response failure, aborting\n");
6997 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6999 rc = bnx2x_setup_leading(bp);
7001 BNX2X_ERR("Setup leading failed!\n");
7005 if (CHIP_IS_E1H(bp))
7006 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7007 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7008 bp->state = BNX2X_STATE_DISABLED;
7011 if (bp->state == BNX2X_STATE_OPEN)
7012 for_each_nondefault_queue(bp, i) {
7013 rc = bnx2x_setup_multi(bp, i);
7019 bnx2x_set_mac_addr_e1(bp, 1);
7021 bnx2x_set_mac_addr_e1h(bp, 1);
7024 bnx2x_initial_phy_init(bp, load_mode);
7026 /* Start fast path */
7027 switch (load_mode) {
7029 /* Tx queue should be only reenabled */
7030 netif_tx_wake_all_queues(bp->dev);
7031 /* Initialize the receive filter. */
7032 bnx2x_set_rx_mode(bp->dev);
7036 netif_tx_start_all_queues(bp->dev);
7037 /* Initialize the receive filter. */
7038 bnx2x_set_rx_mode(bp->dev);
7042 /* Initialize the receive filter. */
7043 bnx2x_set_rx_mode(bp->dev);
7044 bp->state = BNX2X_STATE_DIAG;
7052 bnx2x__link_status_update(bp);
7054 /* start the timer */
7055 mod_timer(&bp->timer, jiffies + bp->current_interval);
7061 bnx2x_int_disable_sync(bp, 1);
7062 if (!BP_NOMCP(bp)) {
7063 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7064 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7067 /* Free SKBs, SGEs, TPA pool and driver internals */
7068 bnx2x_free_skbs(bp);
7069 for_each_rx_queue(bp, i)
7070 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7075 bnx2x_napi_disable(bp);
7076 for_each_rx_queue(bp, i)
7077 netif_napi_del(&bnx2x_fp(bp, i, napi));
7083 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7085 struct bnx2x_fastpath *fp = &bp->fp[index];
7088 /* halt the connection */
7089 fp->state = BNX2X_FP_STATE_HALTING;
7090 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7092 /* Wait for completion */
7093 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7095 if (rc) /* timeout */
7098 /* delete cfc entry */
7099 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7101 /* Wait for completion */
7102 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7107 static int bnx2x_stop_leading(struct bnx2x *bp)
7109 __le16 dsb_sp_prod_idx;
7110 /* if the other port is handling traffic,
7111 this can take a lot of time */
7117 /* Send HALT ramrod */
7118 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7119 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7121 /* Wait for completion */
7122 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7123 &(bp->fp[0].state), 1);
7124 if (rc) /* timeout */
7127 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7129 /* Send PORT_DELETE ramrod */
7130 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7132 /* Wait for completion to arrive on default status block
7133 we are going to reset the chip anyway
7134 so there is not much to do if this times out
7136 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7138 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7139 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7140 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7141 #ifdef BNX2X_STOP_ON_ERROR
7149 rmb(); /* Refresh the dsb_sp_prod */
7151 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7152 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7157 static void bnx2x_reset_func(struct bnx2x *bp)
7159 int port = BP_PORT(bp);
7160 int func = BP_FUNC(bp);
7164 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7165 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7168 base = FUNC_ILT_BASE(func);
7169 for (i = base; i < base + ILT_PER_FUNC; i++)
7170 bnx2x_ilt_wr(bp, i, 0);
7173 static void bnx2x_reset_port(struct bnx2x *bp)
7175 int port = BP_PORT(bp);
7178 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7180 /* Do not rcv packets to BRB */
7181 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7182 /* Do not direct rcv packets that are not for MCP to the BRB */
7183 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7184 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7187 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7190 /* Check for BRB port occupancy */
7191 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7193 DP(NETIF_MSG_IFDOWN,
7194 "BRB1 is not empty %d blocks are occupied\n", val);
7196 /* TODO: Close Doorbell port? */
7199 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7201 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7202 BP_FUNC(bp), reset_code);
7204 switch (reset_code) {
7205 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7206 bnx2x_reset_port(bp);
7207 bnx2x_reset_func(bp);
7208 bnx2x_reset_common(bp);
7211 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7212 bnx2x_reset_port(bp);
7213 bnx2x_reset_func(bp);
7216 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7217 bnx2x_reset_func(bp);
7221 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7226 /* must be called with rtnl_lock */
7227 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7229 int port = BP_PORT(bp);
7233 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7235 bp->rx_mode = BNX2X_RX_MODE_NONE;
7236 bnx2x_set_storm_rx_mode(bp);
7238 bnx2x_netif_stop(bp, 1);
7240 del_timer_sync(&bp->timer);
7241 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7242 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7243 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7248 /* Wait until tx fastpath tasks complete */
7249 for_each_tx_queue(bp, i) {
7250 struct bnx2x_fastpath *fp = &bp->fp[i];
7253 while (bnx2x_has_tx_work_unload(fp)) {
7257 BNX2X_ERR("timeout waiting for queue[%d]\n",
7259 #ifdef BNX2X_STOP_ON_ERROR
7270 /* Give HW time to discard old tx messages */
7273 if (CHIP_IS_E1(bp)) {
7274 struct mac_configuration_cmd *config =
7275 bnx2x_sp(bp, mcast_config);
7277 bnx2x_set_mac_addr_e1(bp, 0);
7279 for (i = 0; i < config->hdr.length; i++)
7280 CAM_INVALIDATE(config->config_table[i]);
7282 config->hdr.length = i;
7283 if (CHIP_REV_IS_SLOW(bp))
7284 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7286 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7287 config->hdr.client_id = bp->fp->cl_id;
7288 config->hdr.reserved1 = 0;
7290 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7291 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7292 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7295 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7297 bnx2x_set_mac_addr_e1h(bp, 0);
7299 for (i = 0; i < MC_HASH_SIZE; i++)
7300 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7303 if (unload_mode == UNLOAD_NORMAL)
7304 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7306 else if (bp->flags & NO_WOL_FLAG) {
7307 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7308 if (CHIP_IS_E1H(bp))
7309 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7311 } else if (bp->wol) {
7312 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7313 u8 *mac_addr = bp->dev->dev_addr;
7315 /* The mac address is written to entries 1-4 to
7316 preserve entry 0 which is used by the PMF */
7317 u8 entry = (BP_E1HVN(bp) + 1)*8;
7319 val = (mac_addr[0] << 8) | mac_addr[1];
7320 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7322 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7323 (mac_addr[4] << 8) | mac_addr[5];
7324 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7326 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7329 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7331 /* Close multi and leading connections
7332 Completions for ramrods are collected in a synchronous way */
7333 for_each_nondefault_queue(bp, i)
7334 if (bnx2x_stop_multi(bp, i))
7337 rc = bnx2x_stop_leading(bp);
7339 BNX2X_ERR("Stop leading failed!\n");
7340 #ifdef BNX2X_STOP_ON_ERROR
7349 reset_code = bnx2x_fw_command(bp, reset_code);
7351 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7352 load_count[0], load_count[1], load_count[2]);
7354 load_count[1 + port]--;
7355 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7356 load_count[0], load_count[1], load_count[2]);
7357 if (load_count[0] == 0)
7358 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7359 else if (load_count[1 + port] == 0)
7360 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7362 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7365 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7366 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7367 bnx2x__link_reset(bp);
7369 /* Reset the chip */
7370 bnx2x_reset_chip(bp, reset_code);
7372 /* Report UNLOAD_DONE to MCP */
7374 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7378 /* Free SKBs, SGEs, TPA pool and driver internals */
7379 bnx2x_free_skbs(bp);
7380 for_each_rx_queue(bp, i)
7381 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7382 for_each_rx_queue(bp, i)
7383 netif_napi_del(&bnx2x_fp(bp, i, napi));
7386 bp->state = BNX2X_STATE_CLOSED;
7388 netif_carrier_off(bp->dev);
7393 static void bnx2x_reset_task(struct work_struct *work)
7395 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7397 #ifdef BNX2X_STOP_ON_ERROR
7398 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7399 " so reset not done to allow debug dump,\n"
7400 KERN_ERR " you will need to reboot when done\n");
7406 if (!netif_running(bp->dev))
7407 goto reset_task_exit;
7409 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7410 bnx2x_nic_load(bp, LOAD_NORMAL);
7416 /* end of nic load/unload */
7421 * Init service functions
7424 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7427 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7428 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7429 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7430 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7431 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7432 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7433 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7434 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7436 BNX2X_ERR("Unsupported function index: %d\n", func);
7441 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7443 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7445 /* Flush all outstanding writes */
7448 /* Pretend to be function 0 */
7450 /* Flush the GRC transaction (in the chip) */
7451 new_val = REG_RD(bp, reg);
7453 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7458 /* From now we are in the "like-E1" mode */
7459 bnx2x_int_disable(bp);
7461 /* Flush all outstanding writes */
7464 /* Restore the original funtion settings */
7465 REG_WR(bp, reg, orig_func);
7466 new_val = REG_RD(bp, reg);
7467 if (new_val != orig_func) {
7468 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7469 orig_func, new_val);
7474 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7476 if (CHIP_IS_E1H(bp))
7477 bnx2x_undi_int_disable_e1h(bp, func);
7479 bnx2x_int_disable(bp);
7482 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7486 /* Check if there is any driver already loaded */
7487 val = REG_RD(bp, MISC_REG_UNPREPARED);
7489 /* Check if it is the UNDI driver
7490 * UNDI driver initializes CID offset for normal bell to 0x7
7492 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7493 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7495 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7497 int func = BP_FUNC(bp);
7501 /* clear the UNDI indication */
7502 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7504 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7506 /* try unload UNDI on port 0 */
7509 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7510 DRV_MSG_SEQ_NUMBER_MASK);
7511 reset_code = bnx2x_fw_command(bp, reset_code);
7513 /* if UNDI is loaded on the other port */
7514 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7516 /* send "DONE" for previous unload */
7517 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7519 /* unload UNDI on port 1 */
7522 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7523 DRV_MSG_SEQ_NUMBER_MASK);
7524 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7526 bnx2x_fw_command(bp, reset_code);
7529 /* now it's safe to release the lock */
7530 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7532 bnx2x_undi_int_disable(bp, func);
7534 /* close input traffic and wait for it */
7535 /* Do not rcv packets to BRB */
7537 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7538 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7539 /* Do not direct rcv packets that are not for MCP to
7542 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7543 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7546 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7547 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7550 /* save NIG port swap info */
7551 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7552 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7555 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7558 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7560 /* take the NIG out of reset and restore swap values */
7562 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7563 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7564 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7565 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7567 /* send unload done to the MCP */
7568 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7570 /* restore our func and fw_seq */
7573 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7574 DRV_MSG_SEQ_NUMBER_MASK);
7577 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7581 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7583 u32 val, val2, val3, val4, id;
7586 /* Get the chip revision id and number. */
7587 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7588 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7589 id = ((val & 0xffff) << 16);
7590 val = REG_RD(bp, MISC_REG_CHIP_REV);
7591 id |= ((val & 0xf) << 12);
7592 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7593 id |= ((val & 0xff) << 4);
7594 val = REG_RD(bp, MISC_REG_BOND_ID);
7596 bp->common.chip_id = id;
7597 bp->link_params.chip_id = bp->common.chip_id;
7598 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7600 val = (REG_RD(bp, 0x2874) & 0x55);
7601 if ((bp->common.chip_id & 0x1) ||
7602 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7603 bp->flags |= ONE_PORT_FLAG;
7604 BNX2X_DEV_INFO("single port device\n");
7607 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7608 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7609 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7610 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7611 bp->common.flash_size, bp->common.flash_size);
7613 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7614 bp->link_params.shmem_base = bp->common.shmem_base;
7615 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7617 if (!bp->common.shmem_base ||
7618 (bp->common.shmem_base < 0xA0000) ||
7619 (bp->common.shmem_base >= 0xC0000)) {
7620 BNX2X_DEV_INFO("MCP not active\n");
7621 bp->flags |= NO_MCP_FLAG;
7625 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7626 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7627 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7628 BNX2X_ERR("BAD MCP validity signature\n");
7630 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7631 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7633 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7634 SHARED_HW_CFG_LED_MODE_MASK) >>
7635 SHARED_HW_CFG_LED_MODE_SHIFT);
7637 bp->link_params.feature_config_flags = 0;
7638 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7639 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7640 bp->link_params.feature_config_flags |=
7641 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7643 bp->link_params.feature_config_flags &=
7644 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7646 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7647 bp->common.bc_ver = val;
7648 BNX2X_DEV_INFO("bc_ver %X\n", val);
7649 if (val < BNX2X_BC_VER) {
7650 /* for now only warn
7651 * later we might need to enforce this */
7652 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7653 " please upgrade BC\n", BNX2X_BC_VER, val);
7656 if (BP_E1HVN(bp) == 0) {
7657 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7658 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7660 /* no WOL capability for E1HVN != 0 */
7661 bp->flags |= NO_WOL_FLAG;
7663 BNX2X_DEV_INFO("%sWoL capable\n",
7664 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7666 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7667 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7668 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7669 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7671 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7672 val, val2, val3, val4);
7675 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7678 int port = BP_PORT(bp);
7681 switch (switch_cfg) {
7683 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7686 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7687 switch (ext_phy_type) {
7688 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7689 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7692 bp->port.supported |= (SUPPORTED_10baseT_Half |
7693 SUPPORTED_10baseT_Full |
7694 SUPPORTED_100baseT_Half |
7695 SUPPORTED_100baseT_Full |
7696 SUPPORTED_1000baseT_Full |
7697 SUPPORTED_2500baseX_Full |
7702 SUPPORTED_Asym_Pause);
7705 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7706 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7709 bp->port.supported |= (SUPPORTED_10baseT_Half |
7710 SUPPORTED_10baseT_Full |
7711 SUPPORTED_100baseT_Half |
7712 SUPPORTED_100baseT_Full |
7713 SUPPORTED_1000baseT_Full |
7718 SUPPORTED_Asym_Pause);
7722 BNX2X_ERR("NVRAM config error. "
7723 "BAD SerDes ext_phy_config 0x%x\n",
7724 bp->link_params.ext_phy_config);
7728 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7730 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7733 case SWITCH_CFG_10G:
7734 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7737 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7738 switch (ext_phy_type) {
7739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7740 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7743 bp->port.supported |= (SUPPORTED_10baseT_Half |
7744 SUPPORTED_10baseT_Full |
7745 SUPPORTED_100baseT_Half |
7746 SUPPORTED_100baseT_Full |
7747 SUPPORTED_1000baseT_Full |
7748 SUPPORTED_2500baseX_Full |
7749 SUPPORTED_10000baseT_Full |
7754 SUPPORTED_Asym_Pause);
7757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7758 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7761 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7762 SUPPORTED_1000baseT_Full |
7766 SUPPORTED_Asym_Pause);
7769 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7770 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7773 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7774 SUPPORTED_2500baseX_Full |
7775 SUPPORTED_1000baseT_Full |
7779 SUPPORTED_Asym_Pause);
7782 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7783 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7786 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7789 SUPPORTED_Asym_Pause);
7792 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7793 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7796 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7797 SUPPORTED_1000baseT_Full |
7800 SUPPORTED_Asym_Pause);
7803 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7804 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7807 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7808 SUPPORTED_1000baseT_Full |
7812 SUPPORTED_Asym_Pause);
7815 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7816 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7819 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7823 SUPPORTED_Asym_Pause);
7826 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7827 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7830 bp->port.supported |= (SUPPORTED_10baseT_Half |
7831 SUPPORTED_10baseT_Full |
7832 SUPPORTED_100baseT_Half |
7833 SUPPORTED_100baseT_Full |
7834 SUPPORTED_1000baseT_Full |
7835 SUPPORTED_10000baseT_Full |
7839 SUPPORTED_Asym_Pause);
7842 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7843 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7844 bp->link_params.ext_phy_config);
7848 BNX2X_ERR("NVRAM config error. "
7849 "BAD XGXS ext_phy_config 0x%x\n",
7850 bp->link_params.ext_phy_config);
7854 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7856 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7861 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7862 bp->port.link_config);
7865 bp->link_params.phy_addr = bp->port.phy_addr;
7867 /* mask what we support according to speed_cap_mask */
7868 if (!(bp->link_params.speed_cap_mask &
7869 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7870 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7872 if (!(bp->link_params.speed_cap_mask &
7873 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7874 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7876 if (!(bp->link_params.speed_cap_mask &
7877 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7878 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7880 if (!(bp->link_params.speed_cap_mask &
7881 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7882 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7884 if (!(bp->link_params.speed_cap_mask &
7885 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7886 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7887 SUPPORTED_1000baseT_Full);
7889 if (!(bp->link_params.speed_cap_mask &
7890 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7891 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7893 if (!(bp->link_params.speed_cap_mask &
7894 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7895 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7897 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7900 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7902 bp->link_params.req_duplex = DUPLEX_FULL;
7904 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7905 case PORT_FEATURE_LINK_SPEED_AUTO:
7906 if (bp->port.supported & SUPPORTED_Autoneg) {
7907 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7908 bp->port.advertising = bp->port.supported;
7911 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7913 if ((ext_phy_type ==
7914 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7916 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7917 /* force 10G, no AN */
7918 bp->link_params.req_line_speed = SPEED_10000;
7919 bp->port.advertising =
7920 (ADVERTISED_10000baseT_Full |
7924 BNX2X_ERR("NVRAM config error. "
7925 "Invalid link_config 0x%x"
7926 " Autoneg not supported\n",
7927 bp->port.link_config);
7932 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7933 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7934 bp->link_params.req_line_speed = SPEED_10;
7935 bp->port.advertising = (ADVERTISED_10baseT_Full |
7938 BNX2X_ERR("NVRAM config error. "
7939 "Invalid link_config 0x%x"
7940 " speed_cap_mask 0x%x\n",
7941 bp->port.link_config,
7942 bp->link_params.speed_cap_mask);
7947 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7948 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7949 bp->link_params.req_line_speed = SPEED_10;
7950 bp->link_params.req_duplex = DUPLEX_HALF;
7951 bp->port.advertising = (ADVERTISED_10baseT_Half |
7954 BNX2X_ERR("NVRAM config error. "
7955 "Invalid link_config 0x%x"
7956 " speed_cap_mask 0x%x\n",
7957 bp->port.link_config,
7958 bp->link_params.speed_cap_mask);
7963 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7964 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7965 bp->link_params.req_line_speed = SPEED_100;
7966 bp->port.advertising = (ADVERTISED_100baseT_Full |
7969 BNX2X_ERR("NVRAM config error. "
7970 "Invalid link_config 0x%x"
7971 " speed_cap_mask 0x%x\n",
7972 bp->port.link_config,
7973 bp->link_params.speed_cap_mask);
7978 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7979 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7980 bp->link_params.req_line_speed = SPEED_100;
7981 bp->link_params.req_duplex = DUPLEX_HALF;
7982 bp->port.advertising = (ADVERTISED_100baseT_Half |
7985 BNX2X_ERR("NVRAM config error. "
7986 "Invalid link_config 0x%x"
7987 " speed_cap_mask 0x%x\n",
7988 bp->port.link_config,
7989 bp->link_params.speed_cap_mask);
7994 case PORT_FEATURE_LINK_SPEED_1G:
7995 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7996 bp->link_params.req_line_speed = SPEED_1000;
7997 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8000 BNX2X_ERR("NVRAM config error. "
8001 "Invalid link_config 0x%x"
8002 " speed_cap_mask 0x%x\n",
8003 bp->port.link_config,
8004 bp->link_params.speed_cap_mask);
8009 case PORT_FEATURE_LINK_SPEED_2_5G:
8010 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8011 bp->link_params.req_line_speed = SPEED_2500;
8012 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8015 BNX2X_ERR("NVRAM config error. "
8016 "Invalid link_config 0x%x"
8017 " speed_cap_mask 0x%x\n",
8018 bp->port.link_config,
8019 bp->link_params.speed_cap_mask);
8024 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8025 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8026 case PORT_FEATURE_LINK_SPEED_10G_KR:
8027 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8028 bp->link_params.req_line_speed = SPEED_10000;
8029 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8032 BNX2X_ERR("NVRAM config error. "
8033 "Invalid link_config 0x%x"
8034 " speed_cap_mask 0x%x\n",
8035 bp->port.link_config,
8036 bp->link_params.speed_cap_mask);
8042 BNX2X_ERR("NVRAM config error. "
8043 "BAD link speed link_config 0x%x\n",
8044 bp->port.link_config);
8045 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8046 bp->port.advertising = bp->port.supported;
8050 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8051 PORT_FEATURE_FLOW_CONTROL_MASK);
8052 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8053 !(bp->port.supported & SUPPORTED_Autoneg))
8054 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8056 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8057 " advertising 0x%x\n",
8058 bp->link_params.req_line_speed,
8059 bp->link_params.req_duplex,
8060 bp->link_params.req_flow_ctrl, bp->port.advertising);
8063 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8065 int port = BP_PORT(bp);
8070 bp->link_params.bp = bp;
8071 bp->link_params.port = port;
8073 bp->link_params.lane_config =
8074 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8075 bp->link_params.ext_phy_config =
8077 dev_info.port_hw_config[port].external_phy_config);
8078 bp->link_params.speed_cap_mask =
8080 dev_info.port_hw_config[port].speed_capability_mask);
8082 bp->port.link_config =
8083 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8085 /* Get the 4 lanes xgxs config rx and tx */
8086 for (i = 0; i < 2; i++) {
8088 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8089 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8090 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8093 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8094 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8095 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8098 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8099 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8100 bp->link_params.feature_config_flags |=
8101 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8103 bp->link_params.feature_config_flags &=
8104 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8106 /* If the device is capable of WoL, set the default state according
8109 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8110 (config & PORT_FEATURE_WOL_ENABLED));
8112 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8113 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8114 bp->link_params.lane_config,
8115 bp->link_params.ext_phy_config,
8116 bp->link_params.speed_cap_mask, bp->port.link_config);
8118 bp->link_params.switch_cfg = (bp->port.link_config &
8119 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8120 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8122 bnx2x_link_settings_requested(bp);
8124 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8125 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8126 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8127 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8128 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8129 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8130 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8131 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8132 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8133 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8136 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8138 int func = BP_FUNC(bp);
8142 bnx2x_get_common_hwinfo(bp);
8146 if (CHIP_IS_E1H(bp)) {
8148 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8150 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8151 FUNC_MF_CFG_E1HOV_TAG_MASK);
8152 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8156 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8158 func, bp->e1hov, bp->e1hov);
8160 BNX2X_DEV_INFO("single function mode\n");
8162 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8163 " aborting\n", func);
8169 if (!BP_NOMCP(bp)) {
8170 bnx2x_get_port_hwinfo(bp);
8172 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8173 DRV_MSG_SEQ_NUMBER_MASK);
8174 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8178 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8179 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8180 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8181 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8182 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8183 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8184 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8185 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8186 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8187 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8188 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8190 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8198 /* only supposed to happen on emulation/FPGA */
8199 BNX2X_ERR("warning random MAC workaround active\n");
8200 random_ether_addr(bp->dev->dev_addr);
8201 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8207 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8209 int func = BP_FUNC(bp);
8213 /* Disable interrupt handling until HW is initialized */
8214 atomic_set(&bp->intr_sem, 1);
8216 mutex_init(&bp->port.phy_mutex);
8218 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8219 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8221 rc = bnx2x_get_hwinfo(bp);
8223 /* need to reset chip if undi was active */
8225 bnx2x_undi_unload(bp);
8227 if (CHIP_REV_IS_FPGA(bp))
8228 printk(KERN_ERR PFX "FPGA detected\n");
8230 if (BP_NOMCP(bp) && (func == 0))
8232 "MCP disabled, must load devices in order!\n");
8234 /* Set multi queue mode */
8235 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8236 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8238 "Multi disabled since int_mode requested is not MSI-X\n");
8239 multi_mode = ETH_RSS_MODE_DISABLED;
8241 bp->multi_mode = multi_mode;
8246 bp->flags &= ~TPA_ENABLE_FLAG;
8247 bp->dev->features &= ~NETIF_F_LRO;
8249 bp->flags |= TPA_ENABLE_FLAG;
8250 bp->dev->features |= NETIF_F_LRO;
8255 bp->tx_ring_size = MAX_TX_AVAIL;
8256 bp->rx_ring_size = MAX_RX_AVAIL;
8263 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8264 bp->current_interval = (poll ? poll : timer_interval);
8266 init_timer(&bp->timer);
8267 bp->timer.expires = jiffies + bp->current_interval;
8268 bp->timer.data = (unsigned long) bp;
8269 bp->timer.function = bnx2x_timer;
8275 * ethtool service functions
8278 /* All ethtool functions called with rtnl_lock */
8280 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8282 struct bnx2x *bp = netdev_priv(dev);
8284 cmd->supported = bp->port.supported;
8285 cmd->advertising = bp->port.advertising;
8287 if (netif_carrier_ok(dev)) {
8288 cmd->speed = bp->link_vars.line_speed;
8289 cmd->duplex = bp->link_vars.duplex;
8291 cmd->speed = bp->link_params.req_line_speed;
8292 cmd->duplex = bp->link_params.req_duplex;
8297 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8298 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8299 if (vn_max_rate < cmd->speed)
8300 cmd->speed = vn_max_rate;
8303 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8305 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8307 switch (ext_phy_type) {
8308 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8309 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8310 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8312 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8313 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8314 cmd->port = PORT_FIBRE;
8317 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8318 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8319 cmd->port = PORT_TP;
8322 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8323 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8324 bp->link_params.ext_phy_config);
8328 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8329 bp->link_params.ext_phy_config);
8333 cmd->port = PORT_TP;
8335 cmd->phy_address = bp->port.phy_addr;
8336 cmd->transceiver = XCVR_INTERNAL;
8338 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8339 cmd->autoneg = AUTONEG_ENABLE;
8341 cmd->autoneg = AUTONEG_DISABLE;
8346 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8347 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8348 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8349 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8350 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8351 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8352 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8357 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8359 struct bnx2x *bp = netdev_priv(dev);
8365 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8366 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8367 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8368 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8369 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8370 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8371 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8373 if (cmd->autoneg == AUTONEG_ENABLE) {
8374 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8375 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8379 /* advertise the requested speed and duplex if supported */
8380 cmd->advertising &= bp->port.supported;
8382 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8383 bp->link_params.req_duplex = DUPLEX_FULL;
8384 bp->port.advertising |= (ADVERTISED_Autoneg |
8387 } else { /* forced speed */
8388 /* advertise the requested speed and duplex if supported */
8389 switch (cmd->speed) {
8391 if (cmd->duplex == DUPLEX_FULL) {
8392 if (!(bp->port.supported &
8393 SUPPORTED_10baseT_Full)) {
8395 "10M full not supported\n");
8399 advertising = (ADVERTISED_10baseT_Full |
8402 if (!(bp->port.supported &
8403 SUPPORTED_10baseT_Half)) {
8405 "10M half not supported\n");
8409 advertising = (ADVERTISED_10baseT_Half |
8415 if (cmd->duplex == DUPLEX_FULL) {
8416 if (!(bp->port.supported &
8417 SUPPORTED_100baseT_Full)) {
8419 "100M full not supported\n");
8423 advertising = (ADVERTISED_100baseT_Full |
8426 if (!(bp->port.supported &
8427 SUPPORTED_100baseT_Half)) {
8429 "100M half not supported\n");
8433 advertising = (ADVERTISED_100baseT_Half |
8439 if (cmd->duplex != DUPLEX_FULL) {
8440 DP(NETIF_MSG_LINK, "1G half not supported\n");
8444 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8445 DP(NETIF_MSG_LINK, "1G full not supported\n");
8449 advertising = (ADVERTISED_1000baseT_Full |
8454 if (cmd->duplex != DUPLEX_FULL) {
8456 "2.5G half not supported\n");
8460 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8462 "2.5G full not supported\n");
8466 advertising = (ADVERTISED_2500baseX_Full |
8471 if (cmd->duplex != DUPLEX_FULL) {
8472 DP(NETIF_MSG_LINK, "10G half not supported\n");
8476 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8477 DP(NETIF_MSG_LINK, "10G full not supported\n");
8481 advertising = (ADVERTISED_10000baseT_Full |
8486 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8490 bp->link_params.req_line_speed = cmd->speed;
8491 bp->link_params.req_duplex = cmd->duplex;
8492 bp->port.advertising = advertising;
8495 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8496 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8497 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8498 bp->port.advertising);
8500 if (netif_running(dev)) {
8501 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8508 #define PHY_FW_VER_LEN 10
8510 static void bnx2x_get_drvinfo(struct net_device *dev,
8511 struct ethtool_drvinfo *info)
8513 struct bnx2x *bp = netdev_priv(dev);
8514 u8 phy_fw_ver[PHY_FW_VER_LEN];
8516 strcpy(info->driver, DRV_MODULE_NAME);
8517 strcpy(info->version, DRV_MODULE_VERSION);
8519 phy_fw_ver[0] = '\0';
8521 bnx2x_acquire_phy_lock(bp);
8522 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8523 (bp->state != BNX2X_STATE_CLOSED),
8524 phy_fw_ver, PHY_FW_VER_LEN);
8525 bnx2x_release_phy_lock(bp);
8528 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8529 (bp->common.bc_ver & 0xff0000) >> 16,
8530 (bp->common.bc_ver & 0xff00) >> 8,
8531 (bp->common.bc_ver & 0xff),
8532 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8533 strcpy(info->bus_info, pci_name(bp->pdev));
8534 info->n_stats = BNX2X_NUM_STATS;
8535 info->testinfo_len = BNX2X_NUM_TESTS;
8536 info->eedump_len = bp->common.flash_size;
8537 info->regdump_len = 0;
8540 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8541 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8543 static int bnx2x_get_regs_len(struct net_device *dev)
8545 static u32 regdump_len;
8546 struct bnx2x *bp = netdev_priv(dev);
8552 if (CHIP_IS_E1(bp)) {
8553 for (i = 0; i < REGS_COUNT; i++)
8554 if (IS_E1_ONLINE(reg_addrs[i].info))
8555 regdump_len += reg_addrs[i].size;
8557 for (i = 0; i < WREGS_COUNT_E1; i++)
8558 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8559 regdump_len += wreg_addrs_e1[i].size *
8560 (1 + wreg_addrs_e1[i].read_regs_count);
8563 for (i = 0; i < REGS_COUNT; i++)
8564 if (IS_E1H_ONLINE(reg_addrs[i].info))
8565 regdump_len += reg_addrs[i].size;
8567 for (i = 0; i < WREGS_COUNT_E1H; i++)
8568 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8569 regdump_len += wreg_addrs_e1h[i].size *
8570 (1 + wreg_addrs_e1h[i].read_regs_count);
8573 regdump_len += sizeof(struct dump_hdr);
8578 static void bnx2x_get_regs(struct net_device *dev,
8579 struct ethtool_regs *regs, void *_p)
8582 struct bnx2x *bp = netdev_priv(dev);
8583 struct dump_hdr dump_hdr = {0};
8586 memset(p, 0, regs->len);
8588 if (!netif_running(bp->dev))
8591 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8592 dump_hdr.dump_sign = dump_sign_all;
8593 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8594 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8595 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8596 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8597 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8599 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8600 p += dump_hdr.hdr_size + 1;
8602 if (CHIP_IS_E1(bp)) {
8603 for (i = 0; i < REGS_COUNT; i++)
8604 if (IS_E1_ONLINE(reg_addrs[i].info))
8605 for (j = 0; j < reg_addrs[i].size; j++)
8607 reg_addrs[i].addr + j*4);
8610 for (i = 0; i < REGS_COUNT; i++)
8611 if (IS_E1H_ONLINE(reg_addrs[i].info))
8612 for (j = 0; j < reg_addrs[i].size; j++)
8614 reg_addrs[i].addr + j*4);
8618 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8620 struct bnx2x *bp = netdev_priv(dev);
8622 if (bp->flags & NO_WOL_FLAG) {
8626 wol->supported = WAKE_MAGIC;
8628 wol->wolopts = WAKE_MAGIC;
8632 memset(&wol->sopass, 0, sizeof(wol->sopass));
8635 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8637 struct bnx2x *bp = netdev_priv(dev);
8639 if (wol->wolopts & ~WAKE_MAGIC)
8642 if (wol->wolopts & WAKE_MAGIC) {
8643 if (bp->flags & NO_WOL_FLAG)
8653 static u32 bnx2x_get_msglevel(struct net_device *dev)
8655 struct bnx2x *bp = netdev_priv(dev);
8657 return bp->msglevel;
8660 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8662 struct bnx2x *bp = netdev_priv(dev);
8664 if (capable(CAP_NET_ADMIN))
8665 bp->msglevel = level;
8668 static int bnx2x_nway_reset(struct net_device *dev)
8670 struct bnx2x *bp = netdev_priv(dev);
8675 if (netif_running(dev)) {
8676 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8684 bnx2x_get_link(struct net_device *dev)
8686 struct bnx2x *bp = netdev_priv(dev);
8688 return bp->link_vars.link_up;
8691 static int bnx2x_get_eeprom_len(struct net_device *dev)
8693 struct bnx2x *bp = netdev_priv(dev);
8695 return bp->common.flash_size;
8698 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8700 int port = BP_PORT(bp);
8704 /* adjust timeout for emulation/FPGA */
8705 count = NVRAM_TIMEOUT_COUNT;
8706 if (CHIP_REV_IS_SLOW(bp))
8709 /* request access to nvram interface */
8710 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8711 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8713 for (i = 0; i < count*10; i++) {
8714 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8715 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8721 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8722 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8729 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8731 int port = BP_PORT(bp);
8735 /* adjust timeout for emulation/FPGA */
8736 count = NVRAM_TIMEOUT_COUNT;
8737 if (CHIP_REV_IS_SLOW(bp))
8740 /* relinquish nvram interface */
8741 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8742 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8744 for (i = 0; i < count*10; i++) {
8745 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8746 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8752 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8753 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8760 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8764 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8766 /* enable both bits, even on read */
8767 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8768 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8769 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8772 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8776 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8778 /* disable both bits, even after read */
8779 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8780 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8781 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8784 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8790 /* build the command word */
8791 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8793 /* need to clear DONE bit separately */
8794 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8796 /* address of the NVRAM to read from */
8797 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8798 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8800 /* issue a read command */
8801 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8803 /* adjust timeout for emulation/FPGA */
8804 count = NVRAM_TIMEOUT_COUNT;
8805 if (CHIP_REV_IS_SLOW(bp))
8808 /* wait for completion */
8811 for (i = 0; i < count; i++) {
8813 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8815 if (val & MCPR_NVM_COMMAND_DONE) {
8816 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8817 /* we read nvram data in cpu order
8818 * but ethtool sees it as an array of bytes
8819 * converting to big-endian will do the work */
8820 *ret_val = cpu_to_be32(val);
8829 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8836 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8838 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8843 if (offset + buf_size > bp->common.flash_size) {
8844 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8845 " buf_size (0x%x) > flash_size (0x%x)\n",
8846 offset, buf_size, bp->common.flash_size);
8850 /* request access to nvram interface */
8851 rc = bnx2x_acquire_nvram_lock(bp);
8855 /* enable access to nvram interface */
8856 bnx2x_enable_nvram_access(bp);
8858 /* read the first word(s) */
8859 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8860 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8861 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8862 memcpy(ret_buf, &val, 4);
8864 /* advance to the next dword */
8865 offset += sizeof(u32);
8866 ret_buf += sizeof(u32);
8867 buf_size -= sizeof(u32);
8872 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8873 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8874 memcpy(ret_buf, &val, 4);
8877 /* disable access to nvram interface */
8878 bnx2x_disable_nvram_access(bp);
8879 bnx2x_release_nvram_lock(bp);
8884 static int bnx2x_get_eeprom(struct net_device *dev,
8885 struct ethtool_eeprom *eeprom, u8 *eebuf)
8887 struct bnx2x *bp = netdev_priv(dev);
8890 if (!netif_running(dev))
8893 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8894 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8895 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8896 eeprom->len, eeprom->len);
8898 /* parameters already validated in ethtool_get_eeprom */
8900 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8905 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8910 /* build the command word */
8911 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8913 /* need to clear DONE bit separately */
8914 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8916 /* write the data */
8917 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8919 /* address of the NVRAM to write to */
8920 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8921 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8923 /* issue the write command */
8924 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8926 /* adjust timeout for emulation/FPGA */
8927 count = NVRAM_TIMEOUT_COUNT;
8928 if (CHIP_REV_IS_SLOW(bp))
8931 /* wait for completion */
8933 for (i = 0; i < count; i++) {
8935 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8936 if (val & MCPR_NVM_COMMAND_DONE) {
8945 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8947 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8955 if (offset + buf_size > bp->common.flash_size) {
8956 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8957 " buf_size (0x%x) > flash_size (0x%x)\n",
8958 offset, buf_size, bp->common.flash_size);
8962 /* request access to nvram interface */
8963 rc = bnx2x_acquire_nvram_lock(bp);
8967 /* enable access to nvram interface */
8968 bnx2x_enable_nvram_access(bp);
8970 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8971 align_offset = (offset & ~0x03);
8972 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8975 val &= ~(0xff << BYTE_OFFSET(offset));
8976 val |= (*data_buf << BYTE_OFFSET(offset));
8978 /* nvram data is returned as an array of bytes
8979 * convert it back to cpu order */
8980 val = be32_to_cpu(val);
8982 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8986 /* disable access to nvram interface */
8987 bnx2x_disable_nvram_access(bp);
8988 bnx2x_release_nvram_lock(bp);
8993 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9001 if (buf_size == 1) /* ethtool */
9002 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9004 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9006 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9011 if (offset + buf_size > bp->common.flash_size) {
9012 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9013 " buf_size (0x%x) > flash_size (0x%x)\n",
9014 offset, buf_size, bp->common.flash_size);
9018 /* request access to nvram interface */
9019 rc = bnx2x_acquire_nvram_lock(bp);
9023 /* enable access to nvram interface */
9024 bnx2x_enable_nvram_access(bp);
9027 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9028 while ((written_so_far < buf_size) && (rc == 0)) {
9029 if (written_so_far == (buf_size - sizeof(u32)))
9030 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9031 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9032 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9033 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9034 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9036 memcpy(&val, data_buf, 4);
9038 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9040 /* advance to the next dword */
9041 offset += sizeof(u32);
9042 data_buf += sizeof(u32);
9043 written_so_far += sizeof(u32);
9047 /* disable access to nvram interface */
9048 bnx2x_disable_nvram_access(bp);
9049 bnx2x_release_nvram_lock(bp);
9054 static int bnx2x_set_eeprom(struct net_device *dev,
9055 struct ethtool_eeprom *eeprom, u8 *eebuf)
9057 struct bnx2x *bp = netdev_priv(dev);
9060 if (!netif_running(dev))
9063 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9064 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9065 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9066 eeprom->len, eeprom->len);
9068 /* parameters already validated in ethtool_set_eeprom */
9070 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9071 if (eeprom->magic == 0x00504859)
9074 bnx2x_acquire_phy_lock(bp);
9075 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9076 bp->link_params.ext_phy_config,
9077 (bp->state != BNX2X_STATE_CLOSED),
9078 eebuf, eeprom->len);
9079 if ((bp->state == BNX2X_STATE_OPEN) ||
9080 (bp->state == BNX2X_STATE_DISABLED)) {
9081 rc |= bnx2x_link_reset(&bp->link_params,
9083 rc |= bnx2x_phy_init(&bp->link_params,
9086 bnx2x_release_phy_lock(bp);
9088 } else /* Only the PMF can access the PHY */
9091 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9096 static int bnx2x_get_coalesce(struct net_device *dev,
9097 struct ethtool_coalesce *coal)
9099 struct bnx2x *bp = netdev_priv(dev);
9101 memset(coal, 0, sizeof(struct ethtool_coalesce));
9103 coal->rx_coalesce_usecs = bp->rx_ticks;
9104 coal->tx_coalesce_usecs = bp->tx_ticks;
9109 static int bnx2x_set_coalesce(struct net_device *dev,
9110 struct ethtool_coalesce *coal)
9112 struct bnx2x *bp = netdev_priv(dev);
9114 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9115 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9116 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9118 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9119 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9120 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9122 if (netif_running(dev))
9123 bnx2x_update_coalesce(bp);
9128 static void bnx2x_get_ringparam(struct net_device *dev,
9129 struct ethtool_ringparam *ering)
9131 struct bnx2x *bp = netdev_priv(dev);
9133 ering->rx_max_pending = MAX_RX_AVAIL;
9134 ering->rx_mini_max_pending = 0;
9135 ering->rx_jumbo_max_pending = 0;
9137 ering->rx_pending = bp->rx_ring_size;
9138 ering->rx_mini_pending = 0;
9139 ering->rx_jumbo_pending = 0;
9141 ering->tx_max_pending = MAX_TX_AVAIL;
9142 ering->tx_pending = bp->tx_ring_size;
9145 static int bnx2x_set_ringparam(struct net_device *dev,
9146 struct ethtool_ringparam *ering)
9148 struct bnx2x *bp = netdev_priv(dev);
9151 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9152 (ering->tx_pending > MAX_TX_AVAIL) ||
9153 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9156 bp->rx_ring_size = ering->rx_pending;
9157 bp->tx_ring_size = ering->tx_pending;
9159 if (netif_running(dev)) {
9160 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9161 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9167 static void bnx2x_get_pauseparam(struct net_device *dev,
9168 struct ethtool_pauseparam *epause)
9170 struct bnx2x *bp = netdev_priv(dev);
9172 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9173 BNX2X_FLOW_CTRL_AUTO) &&
9174 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9176 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9177 BNX2X_FLOW_CTRL_RX);
9178 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9179 BNX2X_FLOW_CTRL_TX);
9181 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9182 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9183 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9186 static int bnx2x_set_pauseparam(struct net_device *dev,
9187 struct ethtool_pauseparam *epause)
9189 struct bnx2x *bp = netdev_priv(dev);
9194 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9195 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9196 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9198 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9200 if (epause->rx_pause)
9201 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9203 if (epause->tx_pause)
9204 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9206 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9207 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9209 if (epause->autoneg) {
9210 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9211 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9215 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9216 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9220 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9222 if (netif_running(dev)) {
9223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9230 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9232 struct bnx2x *bp = netdev_priv(dev);
9236 /* TPA requires Rx CSUM offloading */
9237 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9238 if (!(dev->features & NETIF_F_LRO)) {
9239 dev->features |= NETIF_F_LRO;
9240 bp->flags |= TPA_ENABLE_FLAG;
9244 } else if (dev->features & NETIF_F_LRO) {
9245 dev->features &= ~NETIF_F_LRO;
9246 bp->flags &= ~TPA_ENABLE_FLAG;
9250 if (changed && netif_running(dev)) {
9251 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9252 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9258 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9260 struct bnx2x *bp = netdev_priv(dev);
9265 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9267 struct bnx2x *bp = netdev_priv(dev);
9272 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9273 TPA'ed packets will be discarded due to wrong TCP CSUM */
9275 u32 flags = ethtool_op_get_flags(dev);
9277 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9283 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9286 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9287 dev->features |= NETIF_F_TSO6;
9289 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9290 dev->features &= ~NETIF_F_TSO6;
9296 static const struct {
9297 char string[ETH_GSTRING_LEN];
9298 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9299 { "register_test (offline)" },
9300 { "memory_test (offline)" },
9301 { "loopback_test (offline)" },
9302 { "nvram_test (online)" },
9303 { "interrupt_test (online)" },
9304 { "link_test (online)" },
9305 { "idle check (online)" }
9308 static int bnx2x_self_test_count(struct net_device *dev)
9310 return BNX2X_NUM_TESTS;
9313 static int bnx2x_test_registers(struct bnx2x *bp)
9315 int idx, i, rc = -ENODEV;
9317 int port = BP_PORT(bp);
9318 static const struct {
9323 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9324 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9325 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9326 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9327 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9328 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9329 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9330 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9331 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9332 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9333 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9334 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9335 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9336 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9337 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9338 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9339 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9340 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9341 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9342 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9343 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9344 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9345 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9346 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9347 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9348 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9349 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9350 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9351 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9352 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9353 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9354 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9355 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9356 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9357 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9358 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9359 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9360 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9362 { 0xffffffff, 0, 0x00000000 }
9365 if (!netif_running(bp->dev))
9368 /* Repeat the test twice:
9369 First by writing 0x00000000, second by writing 0xffffffff */
9370 for (idx = 0; idx < 2; idx++) {
9377 wr_val = 0xffffffff;
9381 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9382 u32 offset, mask, save_val, val;
9384 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9385 mask = reg_tbl[i].mask;
9387 save_val = REG_RD(bp, offset);
9389 REG_WR(bp, offset, wr_val);
9390 val = REG_RD(bp, offset);
9392 /* Restore the original register's value */
9393 REG_WR(bp, offset, save_val);
9395 /* verify that value is as expected value */
9396 if ((val & mask) != (wr_val & mask))
9407 static int bnx2x_test_memory(struct bnx2x *bp)
9409 int i, j, rc = -ENODEV;
9411 static const struct {
9415 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9416 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9417 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9418 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9419 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9420 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9421 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9425 static const struct {
9431 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9432 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9433 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9434 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9435 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9436 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9438 { NULL, 0xffffffff, 0, 0 }
9441 if (!netif_running(bp->dev))
9444 /* Go through all the memories */
9445 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9446 for (j = 0; j < mem_tbl[i].size; j++)
9447 REG_RD(bp, mem_tbl[i].offset + j*4);
9449 /* Check the parity status */
9450 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9451 val = REG_RD(bp, prty_tbl[i].offset);
9452 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9453 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9455 "%s is 0x%x\n", prty_tbl[i].name, val);
9466 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9471 while (bnx2x_link_test(bp) && cnt--)
9475 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9477 unsigned int pkt_size, num_pkts, i;
9478 struct sk_buff *skb;
9479 unsigned char *packet;
9480 struct bnx2x_fastpath *fp = &bp->fp[0];
9481 u16 tx_start_idx, tx_idx;
9482 u16 rx_start_idx, rx_idx;
9484 struct sw_tx_bd *tx_buf;
9485 struct eth_tx_bd *tx_bd;
9487 union eth_rx_cqe *cqe;
9489 struct sw_rx_bd *rx_buf;
9493 /* check the loopback mode */
9494 switch (loopback_mode) {
9495 case BNX2X_PHY_LOOPBACK:
9496 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9499 case BNX2X_MAC_LOOPBACK:
9500 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9501 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9507 /* prepare the loopback packet */
9508 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9509 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9510 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9513 goto test_loopback_exit;
9515 packet = skb_put(skb, pkt_size);
9516 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9517 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9518 for (i = ETH_HLEN; i < pkt_size; i++)
9519 packet[i] = (unsigned char) (i & 0xff);
9521 /* send the loopback packet */
9523 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9524 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9526 pkt_prod = fp->tx_pkt_prod++;
9527 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9528 tx_buf->first_bd = fp->tx_bd_prod;
9531 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9532 mapping = pci_map_single(bp->pdev, skb->data,
9533 skb_headlen(skb), PCI_DMA_TODEVICE);
9534 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9535 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9536 tx_bd->nbd = cpu_to_le16(1);
9537 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9538 tx_bd->vlan = cpu_to_le16(pkt_prod);
9539 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9540 ETH_TX_BD_FLAGS_END_BD);
9541 tx_bd->general_data = ((UNICAST_ADDRESS <<
9542 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9546 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9547 mb(); /* FW restriction: must not reorder writing nbd and packets */
9548 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9549 DOORBELL(bp, fp->index, 0);
9555 bp->dev->trans_start = jiffies;
9559 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9560 if (tx_idx != tx_start_idx + num_pkts)
9561 goto test_loopback_exit;
9563 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9564 if (rx_idx != rx_start_idx + num_pkts)
9565 goto test_loopback_exit;
9567 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9568 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9569 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9570 goto test_loopback_rx_exit;
9572 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9573 if (len != pkt_size)
9574 goto test_loopback_rx_exit;
9576 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9578 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9579 for (i = ETH_HLEN; i < pkt_size; i++)
9580 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9581 goto test_loopback_rx_exit;
9585 test_loopback_rx_exit:
9587 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9588 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9589 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9590 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9592 /* Update producers */
9593 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9597 bp->link_params.loopback_mode = LOOPBACK_NONE;
9602 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9606 if (!netif_running(bp->dev))
9607 return BNX2X_LOOPBACK_FAILED;
9609 bnx2x_netif_stop(bp, 1);
9610 bnx2x_acquire_phy_lock(bp);
9612 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9614 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9615 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9618 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9620 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9621 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9624 bnx2x_release_phy_lock(bp);
9625 bnx2x_netif_start(bp);
9630 #define CRC32_RESIDUAL 0xdebb20e3
9632 static int bnx2x_test_nvram(struct bnx2x *bp)
9634 static const struct {
9638 { 0, 0x14 }, /* bootstrap */
9639 { 0x14, 0xec }, /* dir */
9640 { 0x100, 0x350 }, /* manuf_info */
9641 { 0x450, 0xf0 }, /* feature_info */
9642 { 0x640, 0x64 }, /* upgrade_key_info */
9644 { 0x708, 0x70 }, /* manuf_key_info */
9648 __be32 buf[0x350 / 4];
9649 u8 *data = (u8 *)buf;
9653 rc = bnx2x_nvram_read(bp, 0, data, 4);
9655 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9656 goto test_nvram_exit;
9659 magic = be32_to_cpu(buf[0]);
9660 if (magic != 0x669955aa) {
9661 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9663 goto test_nvram_exit;
9666 for (i = 0; nvram_tbl[i].size; i++) {
9668 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9672 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9673 goto test_nvram_exit;
9676 csum = ether_crc_le(nvram_tbl[i].size, data);
9677 if (csum != CRC32_RESIDUAL) {
9679 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9681 goto test_nvram_exit;
9689 static int bnx2x_test_intr(struct bnx2x *bp)
9691 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9694 if (!netif_running(bp->dev))
9697 config->hdr.length = 0;
9699 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9701 config->hdr.offset = BP_FUNC(bp);
9702 config->hdr.client_id = bp->fp->cl_id;
9703 config->hdr.reserved1 = 0;
9705 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9706 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9707 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9709 bp->set_mac_pending++;
9710 for (i = 0; i < 10; i++) {
9711 if (!bp->set_mac_pending)
9713 msleep_interruptible(10);
9722 static void bnx2x_self_test(struct net_device *dev,
9723 struct ethtool_test *etest, u64 *buf)
9725 struct bnx2x *bp = netdev_priv(dev);
9727 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9729 if (!netif_running(dev))
9732 /* offline tests are not supported in MF mode */
9734 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9736 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9737 int port = BP_PORT(bp);
9741 /* save current value of input enable for TX port IF */
9742 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9743 /* disable input for TX port IF */
9744 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9746 link_up = bp->link_vars.link_up;
9747 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9748 bnx2x_nic_load(bp, LOAD_DIAG);
9749 /* wait until link state is restored */
9750 bnx2x_wait_for_link(bp, link_up);
9752 if (bnx2x_test_registers(bp) != 0) {
9754 etest->flags |= ETH_TEST_FL_FAILED;
9756 if (bnx2x_test_memory(bp) != 0) {
9758 etest->flags |= ETH_TEST_FL_FAILED;
9760 buf[2] = bnx2x_test_loopback(bp, link_up);
9762 etest->flags |= ETH_TEST_FL_FAILED;
9764 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9766 /* restore input for TX port IF */
9767 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9769 bnx2x_nic_load(bp, LOAD_NORMAL);
9770 /* wait until link state is restored */
9771 bnx2x_wait_for_link(bp, link_up);
9773 if (bnx2x_test_nvram(bp) != 0) {
9775 etest->flags |= ETH_TEST_FL_FAILED;
9777 if (bnx2x_test_intr(bp) != 0) {
9779 etest->flags |= ETH_TEST_FL_FAILED;
9782 if (bnx2x_link_test(bp) != 0) {
9784 etest->flags |= ETH_TEST_FL_FAILED;
9787 #ifdef BNX2X_EXTRA_DEBUG
9788 bnx2x_panic_dump(bp);
9792 static const struct {
9795 u8 string[ETH_GSTRING_LEN];
9796 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9797 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9798 { Q_STATS_OFFSET32(error_bytes_received_hi),
9799 8, "[%d]: rx_error_bytes" },
9800 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9801 8, "[%d]: rx_ucast_packets" },
9802 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9803 8, "[%d]: rx_mcast_packets" },
9804 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9805 8, "[%d]: rx_bcast_packets" },
9806 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9807 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9808 4, "[%d]: rx_phy_ip_err_discards"},
9809 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9810 4, "[%d]: rx_skb_alloc_discard" },
9811 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9813 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9814 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9815 8, "[%d]: tx_packets" }
9818 static const struct {
9822 #define STATS_FLAGS_PORT 1
9823 #define STATS_FLAGS_FUNC 2
9824 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9825 u8 string[ETH_GSTRING_LEN];
9826 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9827 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9828 8, STATS_FLAGS_BOTH, "rx_bytes" },
9829 { STATS_OFFSET32(error_bytes_received_hi),
9830 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9831 { STATS_OFFSET32(total_unicast_packets_received_hi),
9832 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9833 { STATS_OFFSET32(total_multicast_packets_received_hi),
9834 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9835 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9836 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9837 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9838 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9839 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9840 8, STATS_FLAGS_PORT, "rx_align_errors" },
9841 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9842 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9843 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9844 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9845 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9846 8, STATS_FLAGS_PORT, "rx_fragments" },
9847 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9848 8, STATS_FLAGS_PORT, "rx_jabbers" },
9849 { STATS_OFFSET32(no_buff_discard_hi),
9850 8, STATS_FLAGS_BOTH, "rx_discards" },
9851 { STATS_OFFSET32(mac_filter_discard),
9852 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9853 { STATS_OFFSET32(xxoverflow_discard),
9854 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9855 { STATS_OFFSET32(brb_drop_hi),
9856 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9857 { STATS_OFFSET32(brb_truncate_hi),
9858 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9859 { STATS_OFFSET32(pause_frames_received_hi),
9860 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9861 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9862 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9863 { STATS_OFFSET32(nig_timer_max),
9864 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9865 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9866 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9867 { STATS_OFFSET32(rx_skb_alloc_failed),
9868 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9869 { STATS_OFFSET32(hw_csum_err),
9870 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9872 { STATS_OFFSET32(total_bytes_transmitted_hi),
9873 8, STATS_FLAGS_BOTH, "tx_bytes" },
9874 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9875 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9876 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9877 8, STATS_FLAGS_BOTH, "tx_packets" },
9878 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9879 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9880 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9881 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9882 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9883 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9884 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9885 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9886 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9887 8, STATS_FLAGS_PORT, "tx_deferred" },
9888 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9889 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9890 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9891 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9892 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9893 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9894 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9895 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9896 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9897 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9898 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9899 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9900 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9901 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9902 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9903 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9904 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9905 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9906 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9907 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9908 { STATS_OFFSET32(pause_frames_sent_hi),
9909 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9912 #define IS_PORT_STAT(i) \
9913 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9914 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9915 #define IS_E1HMF_MODE_STAT(bp) \
9916 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9918 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9920 struct bnx2x *bp = netdev_priv(dev);
9923 switch (stringset) {
9927 for_each_queue(bp, i) {
9928 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9929 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9930 bnx2x_q_stats_arr[j].string, i);
9931 k += BNX2X_NUM_Q_STATS;
9933 if (IS_E1HMF_MODE_STAT(bp))
9935 for (j = 0; j < BNX2X_NUM_STATS; j++)
9936 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9937 bnx2x_stats_arr[j].string);
9939 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9940 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9942 strcpy(buf + j*ETH_GSTRING_LEN,
9943 bnx2x_stats_arr[i].string);
9950 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9955 static int bnx2x_get_stats_count(struct net_device *dev)
9957 struct bnx2x *bp = netdev_priv(dev);
9961 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9962 if (!IS_E1HMF_MODE_STAT(bp))
9963 num_stats += BNX2X_NUM_STATS;
9965 if (IS_E1HMF_MODE_STAT(bp)) {
9967 for (i = 0; i < BNX2X_NUM_STATS; i++)
9968 if (IS_FUNC_STAT(i))
9971 num_stats = BNX2X_NUM_STATS;
9977 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9978 struct ethtool_stats *stats, u64 *buf)
9980 struct bnx2x *bp = netdev_priv(dev);
9981 u32 *hw_stats, *offset;
9986 for_each_queue(bp, i) {
9987 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9988 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9989 if (bnx2x_q_stats_arr[j].size == 0) {
9990 /* skip this counter */
9994 offset = (hw_stats +
9995 bnx2x_q_stats_arr[j].offset);
9996 if (bnx2x_q_stats_arr[j].size == 4) {
9997 /* 4-byte counter */
9998 buf[k + j] = (u64) *offset;
10001 /* 8-byte counter */
10002 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10004 k += BNX2X_NUM_Q_STATS;
10006 if (IS_E1HMF_MODE_STAT(bp))
10008 hw_stats = (u32 *)&bp->eth_stats;
10009 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10010 if (bnx2x_stats_arr[j].size == 0) {
10011 /* skip this counter */
10015 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10016 if (bnx2x_stats_arr[j].size == 4) {
10017 /* 4-byte counter */
10018 buf[k + j] = (u64) *offset;
10021 /* 8-byte counter */
10022 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10025 hw_stats = (u32 *)&bp->eth_stats;
10026 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10027 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10029 if (bnx2x_stats_arr[i].size == 0) {
10030 /* skip this counter */
10035 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10036 if (bnx2x_stats_arr[i].size == 4) {
10037 /* 4-byte counter */
10038 buf[j] = (u64) *offset;
10042 /* 8-byte counter */
10043 buf[j] = HILO_U64(*offset, *(offset + 1));
10049 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10051 struct bnx2x *bp = netdev_priv(dev);
10052 int port = BP_PORT(bp);
10055 if (!netif_running(dev))
10064 for (i = 0; i < (data * 2); i++) {
10066 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10067 bp->link_params.hw_led_mode,
10068 bp->link_params.chip_id);
10070 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10071 bp->link_params.hw_led_mode,
10072 bp->link_params.chip_id);
10074 msleep_interruptible(500);
10075 if (signal_pending(current))
10079 if (bp->link_vars.link_up)
10080 bnx2x_set_led(bp, port, LED_MODE_OPER,
10081 bp->link_vars.line_speed,
10082 bp->link_params.hw_led_mode,
10083 bp->link_params.chip_id);
10088 static struct ethtool_ops bnx2x_ethtool_ops = {
10089 .get_settings = bnx2x_get_settings,
10090 .set_settings = bnx2x_set_settings,
10091 .get_drvinfo = bnx2x_get_drvinfo,
10092 .get_regs_len = bnx2x_get_regs_len,
10093 .get_regs = bnx2x_get_regs,
10094 .get_wol = bnx2x_get_wol,
10095 .set_wol = bnx2x_set_wol,
10096 .get_msglevel = bnx2x_get_msglevel,
10097 .set_msglevel = bnx2x_set_msglevel,
10098 .nway_reset = bnx2x_nway_reset,
10099 .get_link = bnx2x_get_link,
10100 .get_eeprom_len = bnx2x_get_eeprom_len,
10101 .get_eeprom = bnx2x_get_eeprom,
10102 .set_eeprom = bnx2x_set_eeprom,
10103 .get_coalesce = bnx2x_get_coalesce,
10104 .set_coalesce = bnx2x_set_coalesce,
10105 .get_ringparam = bnx2x_get_ringparam,
10106 .set_ringparam = bnx2x_set_ringparam,
10107 .get_pauseparam = bnx2x_get_pauseparam,
10108 .set_pauseparam = bnx2x_set_pauseparam,
10109 .get_rx_csum = bnx2x_get_rx_csum,
10110 .set_rx_csum = bnx2x_set_rx_csum,
10111 .get_tx_csum = ethtool_op_get_tx_csum,
10112 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10113 .set_flags = bnx2x_set_flags,
10114 .get_flags = ethtool_op_get_flags,
10115 .get_sg = ethtool_op_get_sg,
10116 .set_sg = ethtool_op_set_sg,
10117 .get_tso = ethtool_op_get_tso,
10118 .set_tso = bnx2x_set_tso,
10119 .self_test_count = bnx2x_self_test_count,
10120 .self_test = bnx2x_self_test,
10121 .get_strings = bnx2x_get_strings,
10122 .phys_id = bnx2x_phys_id,
10123 .get_stats_count = bnx2x_get_stats_count,
10124 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10127 /* end of ethtool_ops */
10129 /****************************************************************************
10130 * General service functions
10131 ****************************************************************************/
10133 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10137 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10141 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10142 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10143 PCI_PM_CTRL_PME_STATUS));
10145 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10146 /* delay required during transition out of D3hot */
10151 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10155 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10157 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10160 /* No more memory access after this point until
10161 * device is brought back to D0.
10171 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10175 /* Tell compiler that status block fields can change */
10177 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10178 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10180 return (fp->rx_comp_cons != rx_cons_sb);
10184 * net_device service functions
10187 static int bnx2x_poll(struct napi_struct *napi, int budget)
10189 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10191 struct bnx2x *bp = fp->bp;
10194 #ifdef BNX2X_STOP_ON_ERROR
10195 if (unlikely(bp->panic))
10199 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10200 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10201 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10203 bnx2x_update_fpsb_idx(fp);
10205 if (bnx2x_has_tx_work(fp))
10208 if (bnx2x_has_rx_work(fp)) {
10209 work_done = bnx2x_rx_int(fp, budget);
10211 /* must not complete if we consumed full budget */
10212 if (work_done >= budget)
10216 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10217 * ensure that status block indices have been actually read
10218 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10219 * so that we won't write the "newer" value of the status block to IGU
10220 * (if there was a DMA right after BNX2X_HAS_WORK and
10221 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10222 * may be postponed to right before bnx2x_ack_sb). In this case
10223 * there will never be another interrupt until there is another update
10224 * of the status block, while there is still unhandled work.
10228 if (!BNX2X_HAS_WORK(fp)) {
10229 #ifdef BNX2X_STOP_ON_ERROR
10232 napi_complete(napi);
10234 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10235 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10236 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10237 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10245 /* we split the first BD into headers and data BDs
10246 * to ease the pain of our fellow microcode engineers
10247 * we use one mapping for both BDs
10248 * So far this has only been observed to happen
10249 * in Other Operating Systems(TM)
10251 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10252 struct bnx2x_fastpath *fp,
10253 struct eth_tx_bd **tx_bd, u16 hlen,
10254 u16 bd_prod, int nbd)
10256 struct eth_tx_bd *h_tx_bd = *tx_bd;
10257 struct eth_tx_bd *d_tx_bd;
10258 dma_addr_t mapping;
10259 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10261 /* first fix first BD */
10262 h_tx_bd->nbd = cpu_to_le16(nbd);
10263 h_tx_bd->nbytes = cpu_to_le16(hlen);
10265 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10266 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10267 h_tx_bd->addr_lo, h_tx_bd->nbd);
10269 /* now get a new data BD
10270 * (after the pbd) and fill it */
10271 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10272 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10274 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10275 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10277 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10278 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10279 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10281 /* this marks the BD as one that has no individual mapping
10282 * the FW ignores this flag in a BD not marked start
10284 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10285 DP(NETIF_MSG_TX_QUEUED,
10286 "TSO split data size is %d (%x:%x)\n",
10287 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10289 /* update tx_bd for marking the last BD flag */
10295 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10298 csum = (u16) ~csum_fold(csum_sub(csum,
10299 csum_partial(t_header - fix, fix, 0)));
10302 csum = (u16) ~csum_fold(csum_add(csum,
10303 csum_partial(t_header, -fix, 0)));
10305 return swab16(csum);
10308 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10312 if (skb->ip_summed != CHECKSUM_PARTIAL)
10316 if (skb->protocol == htons(ETH_P_IPV6)) {
10318 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10319 rc |= XMIT_CSUM_TCP;
10323 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10324 rc |= XMIT_CSUM_TCP;
10328 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10331 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10337 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10338 /* check if packet requires linearization (packet is too fragmented)
10339 no need to check fragmentation if page size > 8K (there will be no
10340 violation to FW restrictions) */
10341 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10346 int first_bd_sz = 0;
10348 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10349 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10351 if (xmit_type & XMIT_GSO) {
10352 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10353 /* Check if LSO packet needs to be copied:
10354 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10355 int wnd_size = MAX_FETCH_BD - 3;
10356 /* Number of windows to check */
10357 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10362 /* Headers length */
10363 hlen = (int)(skb_transport_header(skb) - skb->data) +
10366 /* Amount of data (w/o headers) on linear part of SKB*/
10367 first_bd_sz = skb_headlen(skb) - hlen;
10369 wnd_sum = first_bd_sz;
10371 /* Calculate the first sum - it's special */
10372 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10374 skb_shinfo(skb)->frags[frag_idx].size;
10376 /* If there was data on linear skb data - check it */
10377 if (first_bd_sz > 0) {
10378 if (unlikely(wnd_sum < lso_mss)) {
10383 wnd_sum -= first_bd_sz;
10386 /* Others are easier: run through the frag list and
10387 check all windows */
10388 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10390 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10392 if (unlikely(wnd_sum < lso_mss)) {
10397 skb_shinfo(skb)->frags[wnd_idx].size;
10400 /* in non-LSO too fragmented packet should always
10407 if (unlikely(to_copy))
10408 DP(NETIF_MSG_TX_QUEUED,
10409 "Linearization IS REQUIRED for %s packet. "
10410 "num_frags %d hlen %d first_bd_sz %d\n",
10411 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10412 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10418 /* called with netif_tx_lock
10419 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10420 * netif_wake_queue()
10422 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10424 struct bnx2x *bp = netdev_priv(dev);
10425 struct bnx2x_fastpath *fp;
10426 struct netdev_queue *txq;
10427 struct sw_tx_bd *tx_buf;
10428 struct eth_tx_bd *tx_bd;
10429 struct eth_tx_parse_bd *pbd = NULL;
10430 u16 pkt_prod, bd_prod;
10432 dma_addr_t mapping;
10433 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10434 int vlan_off = (bp->e1hov ? 4 : 0);
10438 #ifdef BNX2X_STOP_ON_ERROR
10439 if (unlikely(bp->panic))
10440 return NETDEV_TX_BUSY;
10443 fp_index = skb_get_queue_mapping(skb);
10444 txq = netdev_get_tx_queue(dev, fp_index);
10446 fp = &bp->fp[fp_index];
10448 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10449 fp->eth_q_stats.driver_xoff++,
10450 netif_tx_stop_queue(txq);
10451 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10452 return NETDEV_TX_BUSY;
10455 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10456 " gso type %x xmit_type %x\n",
10457 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10458 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10460 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10461 /* First, check if we need to linearize the skb (due to FW
10462 restrictions). No need to check fragmentation if page size > 8K
10463 (there will be no violation to FW restrictions) */
10464 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10465 /* Statistics of linearization */
10467 if (skb_linearize(skb) != 0) {
10468 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10469 "silently dropping this SKB\n");
10470 dev_kfree_skb_any(skb);
10471 return NETDEV_TX_OK;
10477 Please read carefully. First we use one BD which we mark as start,
10478 then for TSO or xsum we have a parsing info BD,
10479 and only then we have the rest of the TSO BDs.
10480 (don't forget to mark the last one as last,
10481 and to unmap only AFTER you write to the BD ...)
10482 And above all, all pdb sizes are in words - NOT DWORDS!
10485 pkt_prod = fp->tx_pkt_prod++;
10486 bd_prod = TX_BD(fp->tx_bd_prod);
10488 /* get a tx_buf and first BD */
10489 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10490 tx_bd = &fp->tx_desc_ring[bd_prod];
10492 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10493 tx_bd->general_data = (UNICAST_ADDRESS <<
10494 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10496 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10498 /* remember the first BD of the packet */
10499 tx_buf->first_bd = fp->tx_bd_prod;
10502 DP(NETIF_MSG_TX_QUEUED,
10503 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10504 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10507 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10508 (bp->flags & HW_VLAN_TX_FLAG)) {
10509 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10510 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10514 tx_bd->vlan = cpu_to_le16(pkt_prod);
10517 /* turn on parsing and get a BD */
10518 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10519 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10521 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10524 if (xmit_type & XMIT_CSUM) {
10525 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10527 /* for now NS flag is not used in Linux */
10529 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10530 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10532 pbd->ip_hlen = (skb_transport_header(skb) -
10533 skb_network_header(skb)) / 2;
10535 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10537 pbd->total_hlen = cpu_to_le16(hlen);
10538 hlen = hlen*2 - vlan_off;
10540 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10542 if (xmit_type & XMIT_CSUM_V4)
10543 tx_bd->bd_flags.as_bitfield |=
10544 ETH_TX_BD_FLAGS_IP_CSUM;
10546 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10548 if (xmit_type & XMIT_CSUM_TCP) {
10549 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10552 s8 fix = SKB_CS_OFF(skb); /* signed! */
10554 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10555 pbd->cs_offset = fix / 2;
10557 DP(NETIF_MSG_TX_QUEUED,
10558 "hlen %d offset %d fix %d csum before fix %x\n",
10559 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10562 /* HW bug: fixup the CSUM */
10563 pbd->tcp_pseudo_csum =
10564 bnx2x_csum_fix(skb_transport_header(skb),
10567 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10568 pbd->tcp_pseudo_csum);
10572 mapping = pci_map_single(bp->pdev, skb->data,
10573 skb_headlen(skb), PCI_DMA_TODEVICE);
10575 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10576 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10577 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10578 tx_bd->nbd = cpu_to_le16(nbd);
10579 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10581 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10582 " nbytes %d flags %x vlan %x\n",
10583 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10584 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10585 le16_to_cpu(tx_bd->vlan));
10587 if (xmit_type & XMIT_GSO) {
10589 DP(NETIF_MSG_TX_QUEUED,
10590 "TSO packet len %d hlen %d total len %d tso size %d\n",
10591 skb->len, hlen, skb_headlen(skb),
10592 skb_shinfo(skb)->gso_size);
10594 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10596 if (unlikely(skb_headlen(skb) > hlen))
10597 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10600 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10601 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10602 pbd->tcp_flags = pbd_tcp_flags(skb);
10604 if (xmit_type & XMIT_GSO_V4) {
10605 pbd->ip_id = swab16(ip_hdr(skb)->id);
10606 pbd->tcp_pseudo_csum =
10607 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10608 ip_hdr(skb)->daddr,
10609 0, IPPROTO_TCP, 0));
10612 pbd->tcp_pseudo_csum =
10613 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10614 &ipv6_hdr(skb)->daddr,
10615 0, IPPROTO_TCP, 0));
10617 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10620 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10621 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10623 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10624 tx_bd = &fp->tx_desc_ring[bd_prod];
10626 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10627 frag->size, PCI_DMA_TODEVICE);
10629 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10630 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10631 tx_bd->nbytes = cpu_to_le16(frag->size);
10632 tx_bd->vlan = cpu_to_le16(pkt_prod);
10633 tx_bd->bd_flags.as_bitfield = 0;
10635 DP(NETIF_MSG_TX_QUEUED,
10636 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10637 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10638 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10641 /* now at last mark the BD as the last BD */
10642 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10644 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10645 tx_bd, tx_bd->bd_flags.as_bitfield);
10647 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10649 /* now send a tx doorbell, counting the next BD
10650 * if the packet contains or ends with it
10652 if (TX_BD_POFF(bd_prod) < nbd)
10656 DP(NETIF_MSG_TX_QUEUED,
10657 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10658 " tcp_flags %x xsum %x seq %u hlen %u\n",
10659 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10660 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10661 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10663 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10666 * Make sure that the BD data is updated before updating the producer
10667 * since FW might read the BD right after the producer is updated.
10668 * This is only applicable for weak-ordered memory model archs such
10669 * as IA-64. The following barrier is also mandatory since FW will
10670 * assumes packets must have BDs.
10674 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10675 mb(); /* FW restriction: must not reorder writing nbd and packets */
10676 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10677 DOORBELL(bp, fp->index, 0);
10681 fp->tx_bd_prod += nbd;
10683 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10684 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10685 if we put Tx into XOFF state. */
10687 netif_tx_stop_queue(txq);
10688 fp->eth_q_stats.driver_xoff++;
10689 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10690 netif_tx_wake_queue(txq);
10694 return NETDEV_TX_OK;
10697 /* called with rtnl_lock */
10698 static int bnx2x_open(struct net_device *dev)
10700 struct bnx2x *bp = netdev_priv(dev);
10702 netif_carrier_off(dev);
10704 bnx2x_set_power_state(bp, PCI_D0);
10706 return bnx2x_nic_load(bp, LOAD_OPEN);
10709 /* called with rtnl_lock */
10710 static int bnx2x_close(struct net_device *dev)
10712 struct bnx2x *bp = netdev_priv(dev);
10714 /* Unload the driver, release IRQs */
10715 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10716 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10717 if (!CHIP_REV_IS_SLOW(bp))
10718 bnx2x_set_power_state(bp, PCI_D3hot);
10723 /* called with netif_tx_lock from dev_mcast.c */
10724 static void bnx2x_set_rx_mode(struct net_device *dev)
10726 struct bnx2x *bp = netdev_priv(dev);
10727 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10728 int port = BP_PORT(bp);
10730 if (bp->state != BNX2X_STATE_OPEN) {
10731 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10735 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10737 if (dev->flags & IFF_PROMISC)
10738 rx_mode = BNX2X_RX_MODE_PROMISC;
10740 else if ((dev->flags & IFF_ALLMULTI) ||
10741 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10742 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10744 else { /* some multicasts */
10745 if (CHIP_IS_E1(bp)) {
10746 int i, old, offset;
10747 struct dev_mc_list *mclist;
10748 struct mac_configuration_cmd *config =
10749 bnx2x_sp(bp, mcast_config);
10751 for (i = 0, mclist = dev->mc_list;
10752 mclist && (i < dev->mc_count);
10753 i++, mclist = mclist->next) {
10755 config->config_table[i].
10756 cam_entry.msb_mac_addr =
10757 swab16(*(u16 *)&mclist->dmi_addr[0]);
10758 config->config_table[i].
10759 cam_entry.middle_mac_addr =
10760 swab16(*(u16 *)&mclist->dmi_addr[2]);
10761 config->config_table[i].
10762 cam_entry.lsb_mac_addr =
10763 swab16(*(u16 *)&mclist->dmi_addr[4]);
10764 config->config_table[i].cam_entry.flags =
10766 config->config_table[i].
10767 target_table_entry.flags = 0;
10768 config->config_table[i].
10769 target_table_entry.client_id = 0;
10770 config->config_table[i].
10771 target_table_entry.vlan_id = 0;
10774 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10775 config->config_table[i].
10776 cam_entry.msb_mac_addr,
10777 config->config_table[i].
10778 cam_entry.middle_mac_addr,
10779 config->config_table[i].
10780 cam_entry.lsb_mac_addr);
10782 old = config->hdr.length;
10784 for (; i < old; i++) {
10785 if (CAM_IS_INVALID(config->
10786 config_table[i])) {
10787 /* already invalidated */
10791 CAM_INVALIDATE(config->
10796 if (CHIP_REV_IS_SLOW(bp))
10797 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10799 offset = BNX2X_MAX_MULTICAST*(1 + port);
10801 config->hdr.length = i;
10802 config->hdr.offset = offset;
10803 config->hdr.client_id = bp->fp->cl_id;
10804 config->hdr.reserved1 = 0;
10806 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10807 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10808 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10811 /* Accept one or more multicasts */
10812 struct dev_mc_list *mclist;
10813 u32 mc_filter[MC_HASH_SIZE];
10814 u32 crc, bit, regidx;
10817 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10819 for (i = 0, mclist = dev->mc_list;
10820 mclist && (i < dev->mc_count);
10821 i++, mclist = mclist->next) {
10823 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10826 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10827 bit = (crc >> 24) & 0xff;
10830 mc_filter[regidx] |= (1 << bit);
10833 for (i = 0; i < MC_HASH_SIZE; i++)
10834 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10839 bp->rx_mode = rx_mode;
10840 bnx2x_set_storm_rx_mode(bp);
10843 /* called with rtnl_lock */
10844 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10846 struct sockaddr *addr = p;
10847 struct bnx2x *bp = netdev_priv(dev);
10849 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10852 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10853 if (netif_running(dev)) {
10854 if (CHIP_IS_E1(bp))
10855 bnx2x_set_mac_addr_e1(bp, 1);
10857 bnx2x_set_mac_addr_e1h(bp, 1);
10863 /* called with rtnl_lock */
10864 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10866 struct mii_ioctl_data *data = if_mii(ifr);
10867 struct bnx2x *bp = netdev_priv(dev);
10868 int port = BP_PORT(bp);
10873 data->phy_id = bp->port.phy_addr;
10877 case SIOCGMIIREG: {
10880 if (!netif_running(dev))
10883 mutex_lock(&bp->port.phy_mutex);
10884 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10885 DEFAULT_PHY_DEV_ADDR,
10886 (data->reg_num & 0x1f), &mii_regval);
10887 data->val_out = mii_regval;
10888 mutex_unlock(&bp->port.phy_mutex);
10893 if (!capable(CAP_NET_ADMIN))
10896 if (!netif_running(dev))
10899 mutex_lock(&bp->port.phy_mutex);
10900 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10901 DEFAULT_PHY_DEV_ADDR,
10902 (data->reg_num & 0x1f), data->val_in);
10903 mutex_unlock(&bp->port.phy_mutex);
10911 return -EOPNOTSUPP;
10914 /* called with rtnl_lock */
10915 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10917 struct bnx2x *bp = netdev_priv(dev);
10920 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10921 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10924 /* This does not race with packet allocation
10925 * because the actual alloc size is
10926 * only updated as part of load
10928 dev->mtu = new_mtu;
10930 if (netif_running(dev)) {
10931 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10932 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10938 static void bnx2x_tx_timeout(struct net_device *dev)
10940 struct bnx2x *bp = netdev_priv(dev);
10942 #ifdef BNX2X_STOP_ON_ERROR
10946 /* This allows the netif to be shutdown gracefully before resetting */
10947 schedule_work(&bp->reset_task);
10951 /* called with rtnl_lock */
10952 static void bnx2x_vlan_rx_register(struct net_device *dev,
10953 struct vlan_group *vlgrp)
10955 struct bnx2x *bp = netdev_priv(dev);
10959 /* Set flags according to the required capabilities */
10960 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10962 if (dev->features & NETIF_F_HW_VLAN_TX)
10963 bp->flags |= HW_VLAN_TX_FLAG;
10965 if (dev->features & NETIF_F_HW_VLAN_RX)
10966 bp->flags |= HW_VLAN_RX_FLAG;
10968 if (netif_running(dev))
10969 bnx2x_set_client_config(bp);
10974 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10975 static void poll_bnx2x(struct net_device *dev)
10977 struct bnx2x *bp = netdev_priv(dev);
10979 disable_irq(bp->pdev->irq);
10980 bnx2x_interrupt(bp->pdev->irq, dev);
10981 enable_irq(bp->pdev->irq);
10985 static const struct net_device_ops bnx2x_netdev_ops = {
10986 .ndo_open = bnx2x_open,
10987 .ndo_stop = bnx2x_close,
10988 .ndo_start_xmit = bnx2x_start_xmit,
10989 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10990 .ndo_set_mac_address = bnx2x_change_mac_addr,
10991 .ndo_validate_addr = eth_validate_addr,
10992 .ndo_do_ioctl = bnx2x_ioctl,
10993 .ndo_change_mtu = bnx2x_change_mtu,
10994 .ndo_tx_timeout = bnx2x_tx_timeout,
10996 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10998 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10999 .ndo_poll_controller = poll_bnx2x,
11003 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11004 struct net_device *dev)
11009 SET_NETDEV_DEV(dev, &pdev->dev);
11010 bp = netdev_priv(dev);
11015 bp->func = PCI_FUNC(pdev->devfn);
11017 rc = pci_enable_device(pdev);
11019 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11023 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11024 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11027 goto err_out_disable;
11030 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11031 printk(KERN_ERR PFX "Cannot find second PCI device"
11032 " base address, aborting\n");
11034 goto err_out_disable;
11037 if (atomic_read(&pdev->enable_cnt) == 1) {
11038 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11040 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11042 goto err_out_disable;
11045 pci_set_master(pdev);
11046 pci_save_state(pdev);
11049 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11050 if (bp->pm_cap == 0) {
11051 printk(KERN_ERR PFX "Cannot find power management"
11052 " capability, aborting\n");
11054 goto err_out_release;
11057 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11058 if (bp->pcie_cap == 0) {
11059 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11062 goto err_out_release;
11065 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11066 bp->flags |= USING_DAC_FLAG;
11067 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11068 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11069 " failed, aborting\n");
11071 goto err_out_release;
11074 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11075 printk(KERN_ERR PFX "System does not support DMA,"
11078 goto err_out_release;
11081 dev->mem_start = pci_resource_start(pdev, 0);
11082 dev->base_addr = dev->mem_start;
11083 dev->mem_end = pci_resource_end(pdev, 0);
11085 dev->irq = pdev->irq;
11087 bp->regview = pci_ioremap_bar(pdev, 0);
11088 if (!bp->regview) {
11089 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11091 goto err_out_release;
11094 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11095 min_t(u64, BNX2X_DB_SIZE,
11096 pci_resource_len(pdev, 2)));
11097 if (!bp->doorbells) {
11098 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11100 goto err_out_unmap;
11103 bnx2x_set_power_state(bp, PCI_D0);
11105 /* clean indirect addresses */
11106 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11107 PCICFG_VENDOR_ID_OFFSET);
11108 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11109 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11110 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11111 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11113 dev->watchdog_timeo = TX_TIMEOUT;
11115 dev->netdev_ops = &bnx2x_netdev_ops;
11116 dev->ethtool_ops = &bnx2x_ethtool_ops;
11117 dev->features |= NETIF_F_SG;
11118 dev->features |= NETIF_F_HW_CSUM;
11119 if (bp->flags & USING_DAC_FLAG)
11120 dev->features |= NETIF_F_HIGHDMA;
11122 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11123 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11125 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11126 dev->features |= NETIF_F_TSO6;
11132 iounmap(bp->regview);
11133 bp->regview = NULL;
11135 if (bp->doorbells) {
11136 iounmap(bp->doorbells);
11137 bp->doorbells = NULL;
11141 if (atomic_read(&pdev->enable_cnt) == 1)
11142 pci_release_regions(pdev);
11145 pci_disable_device(pdev);
11146 pci_set_drvdata(pdev, NULL);
11152 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11154 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11156 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11160 /* return value of 1=2.5GHz 2=5GHz */
11161 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11163 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11165 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11168 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11170 struct bnx2x_fw_file_hdr *fw_hdr;
11171 struct bnx2x_fw_file_section *sections;
11173 u32 offset, len, num_ops;
11175 const struct firmware *firmware = bp->firmware;
11178 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11181 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11182 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11184 /* Make sure none of the offsets and sizes make us read beyond
11185 * the end of the firmware data */
11186 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11187 offset = be32_to_cpu(sections[i].offset);
11188 len = be32_to_cpu(sections[i].len);
11189 if (offset + len > firmware->size) {
11190 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11195 /* Likewise for the init_ops offsets */
11196 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11197 ops_offsets = (u16 *)(firmware->data + offset);
11198 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11200 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11201 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11202 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11207 /* Check FW version */
11208 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11209 fw_ver = firmware->data + offset;
11210 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11211 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11212 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11213 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11214 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11215 " Should be %d.%d.%d.%d\n",
11216 fw_ver[0], fw_ver[1], fw_ver[2],
11217 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11218 BCM_5710_FW_MINOR_VERSION,
11219 BCM_5710_FW_REVISION_VERSION,
11220 BCM_5710_FW_ENGINEERING_VERSION);
11227 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11230 const __be32 *source = (const __be32*)_source;
11231 u32 *target = (u32*)_target;
11233 for (i = 0; i < n/4; i++)
11234 target[i] = be32_to_cpu(source[i]);
11238 Ops array is stored in the following format:
11239 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11241 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11244 const __be32 *source = (const __be32*)_source;
11245 struct raw_op *target = (struct raw_op*)_target;
11247 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11248 tmp = be32_to_cpu(source[j]);
11249 target[i].op = (tmp >> 24) & 0xff;
11250 target[i].offset = tmp & 0xffffff;
11251 target[i].raw_data = be32_to_cpu(source[j+1]);
11254 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11257 u16 *target = (u16*)_target;
11258 const __be16 *source = (const __be16*)_source;
11260 for (i = 0; i < n/2; i++)
11261 target[i] = be16_to_cpu(source[i]);
11264 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11266 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11267 bp->arr = kmalloc(len, GFP_KERNEL); \
11269 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11272 func(bp->firmware->data + \
11273 be32_to_cpu(fw_hdr->arr.offset), \
11274 (u8*)bp->arr, len); \
11278 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11280 char fw_file_name[40] = {0};
11282 struct bnx2x_fw_file_hdr *fw_hdr;
11284 /* Create a FW file name */
11285 if (CHIP_IS_E1(bp))
11286 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11288 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11290 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11291 BCM_5710_FW_MAJOR_VERSION,
11292 BCM_5710_FW_MINOR_VERSION,
11293 BCM_5710_FW_REVISION_VERSION,
11294 BCM_5710_FW_ENGINEERING_VERSION);
11296 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11298 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11300 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11301 goto request_firmware_exit;
11304 rc = bnx2x_check_firmware(bp);
11306 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11307 goto request_firmware_exit;
11310 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11312 /* Initialize the pointers to the init arrays */
11314 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11317 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11320 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11322 /* STORMs firmware */
11323 bp->tsem_int_table_data = bp->firmware->data +
11324 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11325 bp->tsem_pram_data = bp->firmware->data +
11326 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11327 bp->usem_int_table_data = bp->firmware->data +
11328 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11329 bp->usem_pram_data = bp->firmware->data +
11330 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11331 bp->xsem_int_table_data = bp->firmware->data +
11332 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11333 bp->xsem_pram_data = bp->firmware->data +
11334 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11335 bp->csem_int_table_data = bp->firmware->data +
11336 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11337 bp->csem_pram_data = bp->firmware->data +
11338 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11341 init_offsets_alloc_err:
11342 kfree(bp->init_ops);
11343 init_ops_alloc_err:
11344 kfree(bp->init_data);
11345 request_firmware_exit:
11346 release_firmware(bp->firmware);
11353 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11354 const struct pci_device_id *ent)
11356 static int version_printed;
11357 struct net_device *dev = NULL;
11361 if (version_printed++ == 0)
11362 printk(KERN_INFO "%s", version);
11364 /* dev zeroed in init_etherdev */
11365 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11367 printk(KERN_ERR PFX "Cannot allocate net device\n");
11371 bp = netdev_priv(dev);
11372 bp->msglevel = debug;
11374 rc = bnx2x_init_dev(pdev, dev);
11380 pci_set_drvdata(pdev, dev);
11382 rc = bnx2x_init_bp(bp);
11384 goto init_one_exit;
11386 /* Set init arrays */
11387 rc = bnx2x_init_firmware(bp, &pdev->dev);
11389 printk(KERN_ERR PFX "Error loading firmware\n");
11390 goto init_one_exit;
11393 rc = register_netdev(dev);
11395 dev_err(&pdev->dev, "Cannot register net device\n");
11396 goto init_one_exit;
11399 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11400 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11401 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11402 bnx2x_get_pcie_width(bp),
11403 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11404 dev->base_addr, bp->pdev->irq);
11405 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11411 iounmap(bp->regview);
11414 iounmap(bp->doorbells);
11418 if (atomic_read(&pdev->enable_cnt) == 1)
11419 pci_release_regions(pdev);
11421 pci_disable_device(pdev);
11422 pci_set_drvdata(pdev, NULL);
11427 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11429 struct net_device *dev = pci_get_drvdata(pdev);
11433 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11436 bp = netdev_priv(dev);
11438 unregister_netdev(dev);
11440 kfree(bp->init_ops_offsets);
11441 kfree(bp->init_ops);
11442 kfree(bp->init_data);
11443 release_firmware(bp->firmware);
11446 iounmap(bp->regview);
11449 iounmap(bp->doorbells);
11453 if (atomic_read(&pdev->enable_cnt) == 1)
11454 pci_release_regions(pdev);
11456 pci_disable_device(pdev);
11457 pci_set_drvdata(pdev, NULL);
11460 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11462 struct net_device *dev = pci_get_drvdata(pdev);
11466 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11469 bp = netdev_priv(dev);
11473 pci_save_state(pdev);
11475 if (!netif_running(dev)) {
11480 netif_device_detach(dev);
11482 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11484 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11491 static int bnx2x_resume(struct pci_dev *pdev)
11493 struct net_device *dev = pci_get_drvdata(pdev);
11498 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11501 bp = netdev_priv(dev);
11505 pci_restore_state(pdev);
11507 if (!netif_running(dev)) {
11512 bnx2x_set_power_state(bp, PCI_D0);
11513 netif_device_attach(dev);
11515 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11522 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11526 bp->state = BNX2X_STATE_ERROR;
11528 bp->rx_mode = BNX2X_RX_MODE_NONE;
11530 bnx2x_netif_stop(bp, 0);
11532 del_timer_sync(&bp->timer);
11533 bp->stats_state = STATS_STATE_DISABLED;
11534 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11537 bnx2x_free_irq(bp);
11539 if (CHIP_IS_E1(bp)) {
11540 struct mac_configuration_cmd *config =
11541 bnx2x_sp(bp, mcast_config);
11543 for (i = 0; i < config->hdr.length; i++)
11544 CAM_INVALIDATE(config->config_table[i]);
11547 /* Free SKBs, SGEs, TPA pool and driver internals */
11548 bnx2x_free_skbs(bp);
11549 for_each_rx_queue(bp, i)
11550 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11551 for_each_rx_queue(bp, i)
11552 netif_napi_del(&bnx2x_fp(bp, i, napi));
11553 bnx2x_free_mem(bp);
11555 bp->state = BNX2X_STATE_CLOSED;
11557 netif_carrier_off(bp->dev);
11562 static void bnx2x_eeh_recover(struct bnx2x *bp)
11566 mutex_init(&bp->port.phy_mutex);
11568 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11569 bp->link_params.shmem_base = bp->common.shmem_base;
11570 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11572 if (!bp->common.shmem_base ||
11573 (bp->common.shmem_base < 0xA0000) ||
11574 (bp->common.shmem_base >= 0xC0000)) {
11575 BNX2X_DEV_INFO("MCP not active\n");
11576 bp->flags |= NO_MCP_FLAG;
11580 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11581 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11582 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11583 BNX2X_ERR("BAD MCP validity signature\n");
11585 if (!BP_NOMCP(bp)) {
11586 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11587 & DRV_MSG_SEQ_NUMBER_MASK);
11588 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11593 * bnx2x_io_error_detected - called when PCI error is detected
11594 * @pdev: Pointer to PCI device
11595 * @state: The current pci connection state
11597 * This function is called after a PCI bus error affecting
11598 * this device has been detected.
11600 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11601 pci_channel_state_t state)
11603 struct net_device *dev = pci_get_drvdata(pdev);
11604 struct bnx2x *bp = netdev_priv(dev);
11608 netif_device_detach(dev);
11610 if (netif_running(dev))
11611 bnx2x_eeh_nic_unload(bp);
11613 pci_disable_device(pdev);
11617 /* Request a slot reset */
11618 return PCI_ERS_RESULT_NEED_RESET;
11622 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11623 * @pdev: Pointer to PCI device
11625 * Restart the card from scratch, as if from a cold-boot.
11627 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11629 struct net_device *dev = pci_get_drvdata(pdev);
11630 struct bnx2x *bp = netdev_priv(dev);
11634 if (pci_enable_device(pdev)) {
11635 dev_err(&pdev->dev,
11636 "Cannot re-enable PCI device after reset\n");
11638 return PCI_ERS_RESULT_DISCONNECT;
11641 pci_set_master(pdev);
11642 pci_restore_state(pdev);
11644 if (netif_running(dev))
11645 bnx2x_set_power_state(bp, PCI_D0);
11649 return PCI_ERS_RESULT_RECOVERED;
11653 * bnx2x_io_resume - called when traffic can start flowing again
11654 * @pdev: Pointer to PCI device
11656 * This callback is called when the error recovery driver tells us that
11657 * its OK to resume normal operation.
11659 static void bnx2x_io_resume(struct pci_dev *pdev)
11661 struct net_device *dev = pci_get_drvdata(pdev);
11662 struct bnx2x *bp = netdev_priv(dev);
11666 bnx2x_eeh_recover(bp);
11668 if (netif_running(dev))
11669 bnx2x_nic_load(bp, LOAD_NORMAL);
11671 netif_device_attach(dev);
11676 static struct pci_error_handlers bnx2x_err_handler = {
11677 .error_detected = bnx2x_io_error_detected,
11678 .slot_reset = bnx2x_io_slot_reset,
11679 .resume = bnx2x_io_resume,
11682 static struct pci_driver bnx2x_pci_driver = {
11683 .name = DRV_MODULE_NAME,
11684 .id_table = bnx2x_pci_tbl,
11685 .probe = bnx2x_init_one,
11686 .remove = __devexit_p(bnx2x_remove_one),
11687 .suspend = bnx2x_suspend,
11688 .resume = bnx2x_resume,
11689 .err_handler = &bnx2x_err_handler,
11692 static int __init bnx2x_init(void)
11696 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11697 if (bnx2x_wq == NULL) {
11698 printk(KERN_ERR PFX "Cannot create workqueue\n");
11702 ret = pci_register_driver(&bnx2x_pci_driver);
11704 printk(KERN_ERR PFX "Cannot register driver\n");
11705 destroy_workqueue(bnx2x_wq);
11710 static void __exit bnx2x_cleanup(void)
11712 pci_unregister_driver(&bnx2x_pci_driver);
11714 destroy_workqueue(bnx2x_wq);
11717 module_init(bnx2x_init);
11718 module_exit(bnx2x_cleanup);