1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.105-1"
60 #define DRV_MODULE_RELDATE "2009/04/22"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
107 static struct workqueue_struct *bnx2x_wq;
109 enum bnx2x_board_type {
115 /* indexed by board_type, above */
118 } board_info[] __devinitdata = {
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
142 * locking is done by mcp
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
164 static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
191 struct dmae_command *dmae = &bp->init_dmae;
192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
204 mutex_lock(&bp->dmae_mutex);
206 memset(dmae, 0, sizeof(struct dmae_command));
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
214 DMAE_CMD_ENDIANITY_DW_SWAP |
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225 dmae->comp_val = DMAE_COMP_VAL;
227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
248 BNX2X_ERR("DMAE timeout!\n");
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
259 mutex_unlock(&bp->dmae_mutex);
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
264 struct dmae_command *dmae = &bp->init_dmae;
265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
279 mutex_lock(&bp->dmae_mutex);
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
290 DMAE_CMD_ENDIANITY_DW_SWAP |
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301 dmae->comp_val = DMAE_COMP_VAL;
303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
317 while (*wb_comp != DMAE_COMP_VAL) {
320 BNX2X_ERR("DMAE timeout!\n");
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
334 mutex_unlock(&bp->dmae_mutex);
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
352 REG_RD_DMAE(bp, reg, wb_data, 2);
354 return HILO_U64(wb_data[0], wb_data[1]);
358 static int bnx2x_mc_assert(struct bnx2x *bp)
362 u32 row0, row1, row2, row3;
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
479 static void bnx2x_fw_dump(struct bnx2x *bp)
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 printk(KERN_CONT "%s", (char *)data);
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
501 printk(KERN_CONT "%s", (char *)data);
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
506 static void bnx2x_panic_dump(struct bnx2x *bp)
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
514 BNX2X_ERR("begin crash dump -----------------\n");
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
525 for_each_rx_queue(bp, i) {
526 struct bnx2x_fastpath *fp = &bp->fp[i];
528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
531 i, fp->rx_bd_prod, fp->rx_bd_cons,
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563 for (j = start; j != end; j = RX_BD(j + 1)) {
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
573 for (j = start; j != end; j = RX_SGE(j + 1)) {
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
616 BNX2X_ERR("end crash dump -----------------\n");
619 static void bnx2x_int_enable(struct bnx2x *bp)
621 int port = BP_PORT(bp);
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 REG_WR(bp, addr, val);
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
654 REG_WR(bp, addr, val);
656 * Ensure that HC_CONFIG is written before leading/trailing edge config
661 if (CHIP_IS_E1H(bp)) {
662 /* init leading/trailing edge */
664 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
666 /* enable nig and gpio3 attention */
671 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
675 /* Make sure that interrupts are indeed enabled from here on */
679 static void bnx2x_int_disable(struct bnx2x *bp)
681 int port = BP_PORT(bp);
682 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683 u32 val = REG_RD(bp, addr);
685 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687 HC_CONFIG_0_REG_INT_LINE_EN_0 |
688 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
690 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
693 /* flush all outstanding writes */
696 REG_WR(bp, addr, val);
697 if (REG_RD(bp, addr) != val)
698 BNX2X_ERR("BUG! proper val not read from IGU!\n");
702 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
704 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
707 /* disable interrupt handling */
708 atomic_inc(&bp->intr_sem);
709 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
712 /* prevent the HW from sending interrupts */
713 bnx2x_int_disable(bp);
715 /* make sure all ISRs are done */
717 synchronize_irq(bp->msix_table[0].vector);
719 for_each_queue(bp, i)
720 synchronize_irq(bp->msix_table[i + offset].vector);
722 synchronize_irq(bp->pdev->irq);
724 /* make sure sp_task is not running */
725 cancel_delayed_work(&bp->sp_task);
726 flush_workqueue(bnx2x_wq);
732 * General service functions
735 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
736 u8 storm, u16 index, u8 op, u8 update)
738 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
739 COMMAND_REG_INT_ACK);
740 struct igu_ack_register igu_ack;
742 igu_ack.status_block_index = index;
743 igu_ack.sb_id_and_flags =
744 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
745 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
746 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
747 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
749 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
750 (*(u32 *)&igu_ack), hc_addr);
751 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
753 /* Make sure that ACK is written */
758 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
760 struct host_status_block *fpsb = fp->status_blk;
763 barrier(); /* status block is written to by the chip */
764 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
765 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
768 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
769 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
775 static u16 bnx2x_ack_int(struct bnx2x *bp)
777 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
778 COMMAND_REG_SIMD_MASK);
779 u32 result = REG_RD(bp, hc_addr);
781 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
789 * fast path service functions
792 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
796 /* Tell compiler that status block fields can change */
798 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
799 return (fp->tx_pkt_cons != tx_cons_sb);
802 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804 /* Tell compiler that consumer and producer can change */
806 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
809 /* free skb in the packet ring at pos idx
810 * return idx of last bd freed
812 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
815 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
816 struct eth_tx_bd *tx_bd;
817 struct sk_buff *skb = tx_buf->skb;
818 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
821 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
825 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
828 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
830 nbd = le16_to_cpu(tx_bd->nbd) - 1;
831 new_cons = nbd + tx_buf->first_bd;
832 #ifdef BNX2X_STOP_ON_ERROR
833 if (nbd > (MAX_SKB_FRAGS + 2)) {
834 BNX2X_ERR("BAD nbd!\n");
839 /* Skip a parse bd and the TSO split header bd
840 since they have no mapping */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
845 ETH_TX_BD_FLAGS_TCP_CSUM |
846 ETH_TX_BD_FLAGS_SW_LSO)) {
848 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849 tx_bd = &fp->tx_desc_ring[bd_idx];
850 /* is this a TSO split header bd? */
851 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
853 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
860 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
861 tx_bd = &fp->tx_desc_ring[bd_idx];
862 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
863 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
865 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
871 tx_buf->first_bd = 0;
877 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
883 barrier(); /* Tell compiler that prod and cons can change */
884 prod = fp->tx_bd_prod;
885 cons = fp->tx_bd_cons;
887 /* NUM_TX_RINGS = number of "next-page" entries
888 It will be used as a threshold */
889 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
891 #ifdef BNX2X_STOP_ON_ERROR
893 WARN_ON(used > fp->bp->tx_ring_size);
894 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
897 return (s16)(fp->bp->tx_ring_size) - used;
900 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
902 struct bnx2x *bp = fp->bp;
903 struct netdev_queue *txq;
904 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
907 #ifdef BNX2X_STOP_ON_ERROR
908 if (unlikely(bp->panic))
912 txq = netdev_get_tx_queue(bp->dev, fp->index);
913 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
914 sw_cons = fp->tx_pkt_cons;
916 while (sw_cons != hw_cons) {
919 pkt_cons = TX_BD(sw_cons);
921 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
923 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
924 hw_cons, sw_cons, pkt_cons);
926 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
928 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
936 fp->tx_pkt_cons = sw_cons;
937 fp->tx_bd_cons = bd_cons;
939 /* TBD need a thresh? */
940 if (unlikely(netif_tx_queue_stopped(txq))) {
942 __netif_tx_lock(txq, smp_processor_id());
944 /* Need to make the tx_bd_cons update visible to start_xmit()
945 * before checking for netif_tx_queue_stopped(). Without the
946 * memory barrier, there is a small possibility that
947 * start_xmit() will miss it and cause the queue to be stopped
952 if ((netif_tx_queue_stopped(txq)) &&
953 (bp->state == BNX2X_STATE_OPEN) &&
954 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
955 netif_tx_wake_queue(txq);
957 __netif_tx_unlock(txq);
962 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
963 union eth_rx_cqe *rr_cqe)
965 struct bnx2x *bp = fp->bp;
966 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
967 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
970 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
971 fp->index, cid, command, bp->state,
972 rr_cqe->ramrod_cqe.ramrod_type);
977 switch (command | fp->state) {
978 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
979 BNX2X_FP_STATE_OPENING):
980 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
982 fp->state = BNX2X_FP_STATE_OPEN;
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
986 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
988 fp->state = BNX2X_FP_STATE_HALTED;
992 BNX2X_ERR("unexpected MC reply (%d) "
993 "fp->state is %x\n", command, fp->state);
996 mb(); /* force bnx2x_wait_ramrod() to see the change */
1000 switch (command | bp->state) {
1001 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1002 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1003 bp->state = BNX2X_STATE_OPEN;
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1008 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1009 fp->state = BNX2X_FP_STATE_HALTED;
1012 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1013 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1014 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1019 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1020 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1021 bp->set_mac_pending = 0;
1024 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1025 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1029 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1030 command, bp->state);
1033 mb(); /* force bnx2x_wait_ramrod() to see the change */
1036 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1037 struct bnx2x_fastpath *fp, u16 index)
1039 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1040 struct page *page = sw_buf->page;
1041 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1043 /* Skip "next page" elements */
1047 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1048 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1049 __free_pages(page, PAGES_PER_SGE_SHIFT);
1051 sw_buf->page = NULL;
1056 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1057 struct bnx2x_fastpath *fp, int last)
1061 for (i = 0; i < last; i++)
1062 bnx2x_free_rx_sge(bp, fp, i);
1065 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1066 struct bnx2x_fastpath *fp, u16 index)
1068 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1073 if (unlikely(page == NULL))
1076 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1077 PCI_DMA_FROMDEVICE);
1078 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1083 sw_buf->page = page;
1084 pci_unmap_addr_set(sw_buf, mapping, mapping);
1086 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1087 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1092 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1093 struct bnx2x_fastpath *fp, u16 index)
1095 struct sk_buff *skb;
1096 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1097 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1100 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1101 if (unlikely(skb == NULL))
1104 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1105 PCI_DMA_FROMDEVICE);
1106 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1112 pci_unmap_addr_set(rx_buf, mapping, mapping);
1114 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1115 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1120 /* note that we are not allocating a new skb,
1121 * we are just moving one from cons to prod
1122 * we are not creating a new mapping,
1123 * so there is no need to check for dma_mapping_error().
1125 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1126 struct sk_buff *skb, u16 cons, u16 prod)
1128 struct bnx2x *bp = fp->bp;
1129 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1130 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1131 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1132 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1134 pci_dma_sync_single_for_device(bp->pdev,
1135 pci_unmap_addr(cons_rx_buf, mapping),
1136 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1138 prod_rx_buf->skb = cons_rx_buf->skb;
1139 pci_unmap_addr_set(prod_rx_buf, mapping,
1140 pci_unmap_addr(cons_rx_buf, mapping));
1141 *prod_bd = *cons_bd;
1144 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1147 u16 last_max = fp->last_max_sge;
1149 if (SUB_S16(idx, last_max) > 0)
1150 fp->last_max_sge = idx;
1153 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1157 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1158 int idx = RX_SGE_CNT * i - 1;
1160 for (j = 0; j < 2; j++) {
1161 SGE_MASK_CLEAR_BIT(fp, idx);
1167 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1168 struct eth_fast_path_rx_cqe *fp_cqe)
1170 struct bnx2x *bp = fp->bp;
1171 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1172 le16_to_cpu(fp_cqe->len_on_bd)) >>
1174 u16 last_max, last_elem, first_elem;
1181 /* First mark all used pages */
1182 for (i = 0; i < sge_len; i++)
1183 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1185 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1186 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1188 /* Here we assume that the last SGE index is the biggest */
1189 prefetch((void *)(fp->sge_mask));
1190 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1192 last_max = RX_SGE(fp->last_max_sge);
1193 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1194 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1196 /* If ring is not full */
1197 if (last_elem + 1 != first_elem)
1200 /* Now update the prod */
1201 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1202 if (likely(fp->sge_mask[i]))
1205 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1206 delta += RX_SGE_MASK_ELEM_SZ;
1210 fp->rx_sge_prod += delta;
1211 /* clear page-end entries */
1212 bnx2x_clear_sge_mask_next_elems(fp);
1215 DP(NETIF_MSG_RX_STATUS,
1216 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1217 fp->last_max_sge, fp->rx_sge_prod);
1220 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1222 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1223 memset(fp->sge_mask, 0xff,
1224 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1226 /* Clear the two last indices in the page to 1:
1227 these are the indices that correspond to the "next" element,
1228 hence will never be indicated and should be removed from
1229 the calculations. */
1230 bnx2x_clear_sge_mask_next_elems(fp);
1233 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1234 struct sk_buff *skb, u16 cons, u16 prod)
1236 struct bnx2x *bp = fp->bp;
1237 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1238 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1239 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1242 /* move empty skb from pool to prod and map it */
1243 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1244 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1245 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1246 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1248 /* move partial skb from cons to pool (don't unmap yet) */
1249 fp->tpa_pool[queue] = *cons_rx_buf;
1251 /* mark bin state as start - print error if current state != stop */
1252 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1253 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1255 fp->tpa_state[queue] = BNX2X_TPA_START;
1257 /* point prod_bd to new skb */
1258 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1259 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1261 #ifdef BNX2X_STOP_ON_ERROR
1262 fp->tpa_queue_used |= (1 << queue);
1263 #ifdef __powerpc64__
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1266 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1268 fp->tpa_queue_used);
1272 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 struct sk_buff *skb,
1274 struct eth_fast_path_rx_cqe *fp_cqe,
1277 struct sw_rx_page *rx_pg, old_rx_pg;
1278 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1279 u32 i, frag_len, frag_size, pages;
1283 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1284 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1286 /* This is needed in order to enable forwarding support */
1288 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1289 max(frag_size, (u32)len_on_bd));
1291 #ifdef BNX2X_STOP_ON_ERROR
1293 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1294 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1296 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1297 fp_cqe->pkt_len, len_on_bd);
1303 /* Run through the SGL and compose the fragmented skb */
1304 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1305 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1307 /* FW gives the indices of the SGE as if the ring is an array
1308 (meaning that "next" element will consume 2 indices) */
1309 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1310 rx_pg = &fp->rx_page_ring[sge_idx];
1313 /* If we fail to allocate a substitute page, we simply stop
1314 where we are and drop the whole packet */
1315 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1316 if (unlikely(err)) {
1317 fp->eth_q_stats.rx_skb_alloc_failed++;
1321 /* Unmap the page as we r going to pass it to the stack */
1322 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1323 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1325 /* Add one frag and update the appropriate fields in the skb */
1326 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1328 skb->data_len += frag_len;
1329 skb->truesize += frag_len;
1330 skb->len += frag_len;
1332 frag_size -= frag_len;
1338 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1339 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1342 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1343 struct sk_buff *skb = rx_buf->skb;
1345 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1347 /* Unmap skb in the pool anyway, as we are going to change
1348 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1350 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1351 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1353 if (likely(new_skb)) {
1354 /* fix ip xsum and give it to the stack */
1355 /* (no need to map the new skb) */
1358 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1359 PARSING_FLAGS_VLAN);
1360 int is_not_hwaccel_vlan_cqe =
1361 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1365 prefetch(((char *)(skb)) + 128);
1367 #ifdef BNX2X_STOP_ON_ERROR
1368 if (pad + len > bp->rx_buf_size) {
1369 BNX2X_ERR("skb_put is about to fail... "
1370 "pad %d len %d rx_buf_size %d\n",
1371 pad, len, bp->rx_buf_size);
1377 skb_reserve(skb, pad);
1380 skb->protocol = eth_type_trans(skb, bp->dev);
1381 skb->ip_summed = CHECKSUM_UNNECESSARY;
1386 iph = (struct iphdr *)skb->data;
1388 /* If there is no Rx VLAN offloading -
1389 take VLAN tag into an account */
1390 if (unlikely(is_not_hwaccel_vlan_cqe))
1391 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1394 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1397 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1398 &cqe->fast_path_cqe, cqe_idx)) {
1400 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1401 (!is_not_hwaccel_vlan_cqe))
1402 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1403 le16_to_cpu(cqe->fast_path_cqe.
1407 netif_receive_skb(skb);
1409 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1410 " - dropping packet!\n");
1415 /* put new skb in bin */
1416 fp->tpa_pool[queue].skb = new_skb;
1419 /* else drop the packet and keep the buffer in the bin */
1420 DP(NETIF_MSG_RX_STATUS,
1421 "Failed to allocate new skb - dropping packet!\n");
1422 fp->eth_q_stats.rx_skb_alloc_failed++;
1425 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1428 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1429 struct bnx2x_fastpath *fp,
1430 u16 bd_prod, u16 rx_comp_prod,
1433 struct ustorm_eth_rx_producers rx_prods = {0};
1436 /* Update producers */
1437 rx_prods.bd_prod = bd_prod;
1438 rx_prods.cqe_prod = rx_comp_prod;
1439 rx_prods.sge_prod = rx_sge_prod;
1442 * Make sure that the BD and SGE data is updated before updating the
1443 * producers since FW might read the BD/SGE right after the producer
1445 * This is only applicable for weak-ordered memory model archs such
1446 * as IA-64. The following barrier is also mandatory since FW will
1447 * assumes BDs must have buffers.
1451 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1452 REG_WR(bp, BAR_USTRORM_INTMEM +
1453 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1454 ((u32 *)&rx_prods)[i]);
1456 mmiowb(); /* keep prod updates ordered */
1458 DP(NETIF_MSG_RX_STATUS,
1459 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1460 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1463 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1465 struct bnx2x *bp = fp->bp;
1466 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1467 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1470 #ifdef BNX2X_STOP_ON_ERROR
1471 if (unlikely(bp->panic))
1475 /* CQ "next element" is of the size of the regular element,
1476 that's why it's ok here */
1477 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1478 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1481 bd_cons = fp->rx_bd_cons;
1482 bd_prod = fp->rx_bd_prod;
1483 bd_prod_fw = bd_prod;
1484 sw_comp_cons = fp->rx_comp_cons;
1485 sw_comp_prod = fp->rx_comp_prod;
1487 /* Memory barrier necessary as speculative reads of the rx
1488 * buffer can be ahead of the index in the status block
1492 DP(NETIF_MSG_RX_STATUS,
1493 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1494 fp->index, hw_comp_cons, sw_comp_cons);
1496 while (sw_comp_cons != hw_comp_cons) {
1497 struct sw_rx_bd *rx_buf = NULL;
1498 struct sk_buff *skb;
1499 union eth_rx_cqe *cqe;
1503 comp_ring_cons = RCQ_BD(sw_comp_cons);
1504 bd_prod = RX_BD(bd_prod);
1505 bd_cons = RX_BD(bd_cons);
1507 cqe = &fp->rx_comp_ring[comp_ring_cons];
1508 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1510 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1511 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1512 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1513 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1514 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1515 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1517 /* is this a slowpath msg? */
1518 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1519 bnx2x_sp_event(fp, cqe);
1522 /* this is an rx packet */
1524 rx_buf = &fp->rx_buf_ring[bd_cons];
1526 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1527 pad = cqe->fast_path_cqe.placement_offset;
1529 /* If CQE is marked both TPA_START and TPA_END
1530 it is a non-TPA CQE */
1531 if ((!fp->disable_tpa) &&
1532 (TPA_TYPE(cqe_fp_flags) !=
1533 (TPA_TYPE_START | TPA_TYPE_END))) {
1534 u16 queue = cqe->fast_path_cqe.queue_index;
1536 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1537 DP(NETIF_MSG_RX_STATUS,
1538 "calling tpa_start on queue %d\n",
1541 bnx2x_tpa_start(fp, queue, skb,
1546 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1547 DP(NETIF_MSG_RX_STATUS,
1548 "calling tpa_stop on queue %d\n",
1551 if (!BNX2X_RX_SUM_FIX(cqe))
1552 BNX2X_ERR("STOP on none TCP "
1555 /* This is a size of the linear data
1557 len = le16_to_cpu(cqe->fast_path_cqe.
1559 bnx2x_tpa_stop(bp, fp, queue, pad,
1560 len, cqe, comp_ring_cons);
1561 #ifdef BNX2X_STOP_ON_ERROR
1566 bnx2x_update_sge_prod(fp,
1567 &cqe->fast_path_cqe);
1572 pci_dma_sync_single_for_device(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
1574 pad + RX_COPY_THRESH,
1575 PCI_DMA_FROMDEVICE);
1577 prefetch(((char *)(skb)) + 128);
1579 /* is this an error packet? */
1580 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1581 DP(NETIF_MSG_RX_ERR,
1582 "ERROR flags %x rx packet %u\n",
1583 cqe_fp_flags, sw_comp_cons);
1584 fp->eth_q_stats.rx_err_discard_pkt++;
1588 /* Since we don't have a jumbo ring
1589 * copy small packets if mtu > 1500
1591 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1592 (len <= RX_COPY_THRESH)) {
1593 struct sk_buff *new_skb;
1595 new_skb = netdev_alloc_skb(bp->dev,
1597 if (new_skb == NULL) {
1598 DP(NETIF_MSG_RX_ERR,
1599 "ERROR packet dropped "
1600 "because of alloc failure\n");
1601 fp->eth_q_stats.rx_skb_alloc_failed++;
1606 skb_copy_from_linear_data_offset(skb, pad,
1607 new_skb->data + pad, len);
1608 skb_reserve(new_skb, pad);
1609 skb_put(new_skb, len);
1611 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1615 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1616 pci_unmap_single(bp->pdev,
1617 pci_unmap_addr(rx_buf, mapping),
1619 PCI_DMA_FROMDEVICE);
1620 skb_reserve(skb, pad);
1624 DP(NETIF_MSG_RX_ERR,
1625 "ERROR packet dropped because "
1626 "of alloc failure\n");
1627 fp->eth_q_stats.rx_skb_alloc_failed++;
1629 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1633 skb->protocol = eth_type_trans(skb, bp->dev);
1635 skb->ip_summed = CHECKSUM_NONE;
1637 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1638 skb->ip_summed = CHECKSUM_UNNECESSARY;
1640 fp->eth_q_stats.hw_csum_err++;
1644 skb_record_rx_queue(skb, fp->index);
1646 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1647 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1648 PARSING_FLAGS_VLAN))
1649 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1650 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1653 netif_receive_skb(skb);
1659 bd_cons = NEXT_RX_IDX(bd_cons);
1660 bd_prod = NEXT_RX_IDX(bd_prod);
1661 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1664 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1665 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1667 if (rx_pkt == budget)
1671 fp->rx_bd_cons = bd_cons;
1672 fp->rx_bd_prod = bd_prod_fw;
1673 fp->rx_comp_cons = sw_comp_cons;
1674 fp->rx_comp_prod = sw_comp_prod;
1676 /* Update producers */
1677 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1680 fp->rx_pkt += rx_pkt;
1686 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1688 struct bnx2x_fastpath *fp = fp_cookie;
1689 struct bnx2x *bp = fp->bp;
1690 int index = fp->index;
1692 /* Return here if interrupt is disabled */
1693 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1694 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1698 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1700 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1702 #ifdef BNX2X_STOP_ON_ERROR
1703 if (unlikely(bp->panic))
1707 prefetch(fp->rx_cons_sb);
1708 prefetch(fp->tx_cons_sb);
1709 prefetch(&fp->status_blk->c_status_block.status_block_index);
1710 prefetch(&fp->status_blk->u_status_block.status_block_index);
1712 napi_schedule(&bnx2x_fp(bp, index, napi));
1717 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1719 struct bnx2x *bp = netdev_priv(dev_instance);
1720 u16 status = bnx2x_ack_int(bp);
1723 /* Return here if interrupt is shared and it's not for us */
1724 if (unlikely(status == 0)) {
1725 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1728 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1730 /* Return here if interrupt is disabled */
1731 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1732 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1736 #ifdef BNX2X_STOP_ON_ERROR
1737 if (unlikely(bp->panic))
1741 mask = 0x2 << bp->fp[0].sb_id;
1742 if (status & mask) {
1743 struct bnx2x_fastpath *fp = &bp->fp[0];
1745 prefetch(fp->rx_cons_sb);
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748 prefetch(&fp->status_blk->u_status_block.status_block_index);
1750 napi_schedule(&bnx2x_fp(bp, 0, napi));
1756 if (unlikely(status & 0x1)) {
1757 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1765 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1771 /* end of fast path */
1773 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1778 * General service functions
1781 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1784 u32 resource_bit = (1 << resource);
1785 int func = BP_FUNC(bp);
1786 u32 hw_lock_control_reg;
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1804 /* Validating that the resource is not already taken */
1805 lock_status = REG_RD(bp, hw_lock_control_reg);
1806 if (lock_status & resource_bit) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1812 /* Try for 5 second every 5ms */
1813 for (cnt = 0; cnt < 1000; cnt++) {
1814 /* Try to acquire the lock */
1815 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1816 lock_status = REG_RD(bp, hw_lock_control_reg);
1817 if (lock_status & resource_bit)
1822 DP(NETIF_MSG_HW, "Timeout\n");
1826 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1829 u32 resource_bit = (1 << resource);
1830 int func = BP_FUNC(bp);
1831 u32 hw_lock_control_reg;
1833 /* Validating that the resource is within range */
1834 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1836 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1837 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1842 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1844 hw_lock_control_reg =
1845 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1848 /* Validating that the resource is currently taken */
1849 lock_status = REG_RD(bp, hw_lock_control_reg);
1850 if (!(lock_status & resource_bit)) {
1851 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1852 lock_status, resource_bit);
1856 REG_WR(bp, hw_lock_control_reg, resource_bit);
1860 /* HW Lock for shared dual port PHYs */
1861 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1863 mutex_lock(&bp->port.phy_mutex);
1865 if (bp->port.need_hw_lock)
1866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1869 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1871 if (bp->port.need_hw_lock)
1872 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1874 mutex_unlock(&bp->port.phy_mutex);
1877 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1879 /* The GPIO should be swapped if swap register is set and active */
1880 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1881 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1882 int gpio_shift = gpio_num +
1883 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1884 u32 gpio_mask = (1 << gpio_shift);
1888 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1889 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1893 /* read GPIO value */
1894 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1896 /* get the requested pin value */
1897 if ((gpio_reg & gpio_mask) == gpio_mask)
1902 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1907 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1909 /* The GPIO should be swapped if swap register is set and active */
1910 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1911 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1912 int gpio_shift = gpio_num +
1913 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1914 u32 gpio_mask = (1 << gpio_shift);
1917 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1918 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1922 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1923 /* read GPIO and mask except the float bits */
1924 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1927 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1928 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1929 gpio_num, gpio_shift);
1930 /* clear FLOAT and set CLR */
1931 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1935 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1936 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1937 gpio_num, gpio_shift);
1938 /* clear FLOAT and set SET */
1939 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1940 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1943 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1944 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1945 gpio_num, gpio_shift);
1947 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1954 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1955 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1960 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1962 /* The GPIO should be swapped if swap register is set and active */
1963 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1964 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1965 int gpio_shift = gpio_num +
1966 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1967 u32 gpio_mask = (1 << gpio_shift);
1970 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1971 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1975 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1977 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1980 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1981 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1982 "output low\n", gpio_num, gpio_shift);
1983 /* clear SET and set CLR */
1984 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1985 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1988 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1989 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1990 "output high\n", gpio_num, gpio_shift);
1991 /* clear CLR and set SET */
1992 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1993 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2000 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2001 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2006 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2008 u32 spio_mask = (1 << spio_num);
2011 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2012 (spio_num > MISC_REGISTERS_SPIO_7)) {
2013 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2017 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2018 /* read SPIO and mask except the float bits */
2019 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2022 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2023 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2024 /* clear FLOAT and set CLR */
2025 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2029 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2030 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2031 /* clear FLOAT and set SET */
2032 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2033 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2036 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2037 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2039 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2046 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2047 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2052 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2054 switch (bp->link_vars.ieee_fc &
2055 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2056 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2057 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2061 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2062 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2066 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2067 bp->port.advertising |= ADVERTISED_Asym_Pause;
2071 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2077 static void bnx2x_link_report(struct bnx2x *bp)
2079 if (bp->link_vars.link_up) {
2080 if (bp->state == BNX2X_STATE_OPEN)
2081 netif_carrier_on(bp->dev);
2082 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2084 printk("%d Mbps ", bp->link_vars.line_speed);
2086 if (bp->link_vars.duplex == DUPLEX_FULL)
2087 printk("full duplex");
2089 printk("half duplex");
2091 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2092 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2093 printk(", receive ");
2094 if (bp->link_vars.flow_ctrl &
2096 printk("& transmit ");
2098 printk(", transmit ");
2100 printk("flow control ON");
2104 } else { /* link_down */
2105 netif_carrier_off(bp->dev);
2106 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2110 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2112 if (!BP_NOMCP(bp)) {
2115 /* Initialize link parameters structure variables */
2116 /* It is recommended to turn off RX FC for jumbo frames
2117 for better performance */
2119 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2120 else if (bp->dev->mtu > 5000)
2121 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2123 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2125 bnx2x_acquire_phy_lock(bp);
2127 if (load_mode == LOAD_DIAG)
2128 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2130 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2132 bnx2x_release_phy_lock(bp);
2134 bnx2x_calc_fc_adv(bp);
2136 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2137 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2138 bnx2x_link_report(bp);
2143 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2147 static void bnx2x_link_set(struct bnx2x *bp)
2149 if (!BP_NOMCP(bp)) {
2150 bnx2x_acquire_phy_lock(bp);
2151 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2152 bnx2x_release_phy_lock(bp);
2154 bnx2x_calc_fc_adv(bp);
2156 BNX2X_ERR("Bootcode is missing - can not set link\n");
2159 static void bnx2x__link_reset(struct bnx2x *bp)
2161 if (!BP_NOMCP(bp)) {
2162 bnx2x_acquire_phy_lock(bp);
2163 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2164 bnx2x_release_phy_lock(bp);
2166 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2169 static u8 bnx2x_link_test(struct bnx2x *bp)
2173 bnx2x_acquire_phy_lock(bp);
2174 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2175 bnx2x_release_phy_lock(bp);
2180 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2182 u32 r_param = bp->link_vars.line_speed / 8;
2183 u32 fair_periodic_timeout_usec;
2186 memset(&(bp->cmng.rs_vars), 0,
2187 sizeof(struct rate_shaping_vars_per_port));
2188 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2190 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2191 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2193 /* this is the threshold below which no timer arming will occur
2194 1.25 coefficient is for the threshold to be a little bigger
2195 than the real time, to compensate for timer in-accuracy */
2196 bp->cmng.rs_vars.rs_threshold =
2197 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2199 /* resolution of fairness timer */
2200 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2201 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2202 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2204 /* this is the threshold below which we won't arm the timer anymore */
2205 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2207 /* we multiply by 1e3/8 to get bytes/msec.
2208 We don't want the credits to pass a credit
2209 of the t_fair*FAIR_MEM (algorithm resolution) */
2210 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2211 /* since each tick is 4 usec */
2212 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2215 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2217 struct rate_shaping_vars_per_vn m_rs_vn;
2218 struct fairness_vars_per_vn m_fair_vn;
2219 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2220 u16 vn_min_rate, vn_max_rate;
2223 /* If function is hidden - set min and max to zeroes */
2224 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2229 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2230 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2231 /* If fairness is enabled (not all min rates are zeroes) and
2232 if current min rate is zero - set it to 1.
2233 This is a requirement of the algorithm. */
2234 if (bp->vn_weight_sum && (vn_min_rate == 0))
2235 vn_min_rate = DEF_MIN_RATE;
2236 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2237 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2241 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2242 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2244 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2245 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2247 /* global vn counter - maximal Mbps for this vn */
2248 m_rs_vn.vn_counter.rate = vn_max_rate;
2250 /* quota - number of bytes transmitted in this period */
2251 m_rs_vn.vn_counter.quota =
2252 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2254 if (bp->vn_weight_sum) {
2255 /* credit for each period of the fairness algorithm:
2256 number of bytes in T_FAIR (the vn share the port rate).
2257 vn_weight_sum should not be larger than 10000, thus
2258 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2260 m_fair_vn.vn_credit_delta =
2261 max((u32)(vn_min_rate * (T_FAIR_COEF /
2262 (8 * bp->vn_weight_sum))),
2263 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2264 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2265 m_fair_vn.vn_credit_delta);
2268 /* Store it to internal memory */
2269 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2270 REG_WR(bp, BAR_XSTRORM_INTMEM +
2271 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2272 ((u32 *)(&m_rs_vn))[i]);
2274 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2275 REG_WR(bp, BAR_XSTRORM_INTMEM +
2276 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2277 ((u32 *)(&m_fair_vn))[i]);
2281 /* This function is called upon link interrupt */
2282 static void bnx2x_link_attn(struct bnx2x *bp)
2284 /* Make sure that we are synced with the current statistics */
2285 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2287 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2289 if (bp->link_vars.link_up) {
2291 /* dropless flow control */
2292 if (CHIP_IS_E1H(bp)) {
2293 int port = BP_PORT(bp);
2294 u32 pause_enabled = 0;
2296 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2299 REG_WR(bp, BAR_USTRORM_INTMEM +
2300 USTORM_PAUSE_ENABLED_OFFSET(port),
2304 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2305 struct host_port_stats *pstats;
2307 pstats = bnx2x_sp(bp, port_stats);
2308 /* reset old bmac stats */
2309 memset(&(pstats->mac_stx[0]), 0,
2310 sizeof(struct mac_stx));
2312 if ((bp->state == BNX2X_STATE_OPEN) ||
2313 (bp->state == BNX2X_STATE_DISABLED))
2314 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2317 /* indicate link status */
2318 bnx2x_link_report(bp);
2321 int port = BP_PORT(bp);
2325 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2326 if (vn == BP_E1HVN(bp))
2329 func = ((vn << 1) | port);
2331 /* Set the attention towards other drivers
2333 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2334 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2337 if (bp->link_vars.link_up) {
2340 /* Init rate shaping and fairness contexts */
2341 bnx2x_init_port_minmax(bp);
2343 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2344 bnx2x_init_vn_minmax(bp, 2*vn + port);
2346 /* Store it to internal memory */
2348 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2349 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2351 ((u32 *)(&bp->cmng))[i]);
2356 static void bnx2x__link_status_update(struct bnx2x *bp)
2358 if (bp->state != BNX2X_STATE_OPEN)
2361 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2363 if (bp->link_vars.link_up)
2364 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2366 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2368 /* indicate link status */
2369 bnx2x_link_report(bp);
2372 static void bnx2x_pmf_update(struct bnx2x *bp)
2374 int port = BP_PORT(bp);
2378 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2380 /* enable nig attention */
2381 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2382 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2383 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2385 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2393 * General service functions
2396 /* the slow path queue is odd since completions arrive on the fastpath ring */
2397 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2398 u32 data_hi, u32 data_lo, int common)
2400 int func = BP_FUNC(bp);
2402 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2403 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2404 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2405 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2406 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2408 #ifdef BNX2X_STOP_ON_ERROR
2409 if (unlikely(bp->panic))
2413 spin_lock_bh(&bp->spq_lock);
2415 if (!bp->spq_left) {
2416 BNX2X_ERR("BUG! SPQ ring full!\n");
2417 spin_unlock_bh(&bp->spq_lock);
2422 /* CID needs port number to be encoded int it */
2423 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2424 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2426 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2428 bp->spq_prod_bd->hdr.type |=
2429 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2431 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2432 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2436 if (bp->spq_prod_bd == bp->spq_last_bd) {
2437 bp->spq_prod_bd = bp->spq;
2438 bp->spq_prod_idx = 0;
2439 DP(NETIF_MSG_TIMER, "end of spq\n");
2446 /* Make sure that BD data is updated before writing the producer */
2449 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2454 spin_unlock_bh(&bp->spq_lock);
2458 /* acquire split MCP access lock register */
2459 static int bnx2x_acquire_alr(struct bnx2x *bp)
2466 for (j = 0; j < i*10; j++) {
2468 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2469 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2470 if (val & (1L << 31))
2475 if (!(val & (1L << 31))) {
2476 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2483 /* release split MCP access lock register */
2484 static void bnx2x_release_alr(struct bnx2x *bp)
2488 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2491 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2493 struct host_def_status_block *def_sb = bp->def_status_blk;
2496 barrier(); /* status block is written to by the chip */
2497 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2498 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2501 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2502 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2505 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2506 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2509 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2510 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2513 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2514 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2521 * slow path service functions
2524 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2526 int port = BP_PORT(bp);
2527 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2528 COMMAND_REG_ATTN_BITS_SET);
2529 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2530 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2531 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2532 NIG_REG_MASK_INTERRUPT_PORT0;
2536 if (bp->attn_state & asserted)
2537 BNX2X_ERR("IGU ERROR\n");
2539 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540 aeu_mask = REG_RD(bp, aeu_addr);
2542 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2543 aeu_mask, asserted);
2544 aeu_mask &= ~(asserted & 0xff);
2545 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2547 REG_WR(bp, aeu_addr, aeu_mask);
2548 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2550 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2551 bp->attn_state |= asserted;
2552 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2554 if (asserted & ATTN_HARD_WIRED_MASK) {
2555 if (asserted & ATTN_NIG_FOR_FUNC) {
2557 bnx2x_acquire_phy_lock(bp);
2559 /* save nig interrupt mask */
2560 nig_mask = REG_RD(bp, nig_int_mask_addr);
2561 REG_WR(bp, nig_int_mask_addr, 0);
2563 bnx2x_link_attn(bp);
2565 /* handle unicore attn? */
2567 if (asserted & ATTN_SW_TIMER_4_FUNC)
2568 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2570 if (asserted & GPIO_2_FUNC)
2571 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2573 if (asserted & GPIO_3_FUNC)
2574 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2576 if (asserted & GPIO_4_FUNC)
2577 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2580 if (asserted & ATTN_GENERAL_ATTN_1) {
2581 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2582 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2584 if (asserted & ATTN_GENERAL_ATTN_2) {
2585 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2586 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2588 if (asserted & ATTN_GENERAL_ATTN_3) {
2589 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2590 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2593 if (asserted & ATTN_GENERAL_ATTN_4) {
2594 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2595 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2597 if (asserted & ATTN_GENERAL_ATTN_5) {
2598 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2599 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2601 if (asserted & ATTN_GENERAL_ATTN_6) {
2602 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2607 } /* if hardwired */
2609 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2611 REG_WR(bp, hc_addr, asserted);
2613 /* now set back the mask */
2614 if (asserted & ATTN_NIG_FOR_FUNC) {
2615 REG_WR(bp, nig_int_mask_addr, nig_mask);
2616 bnx2x_release_phy_lock(bp);
2620 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2622 int port = BP_PORT(bp);
2624 /* mark the failure */
2625 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2626 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2628 bp->link_params.ext_phy_config);
2630 /* log the failure */
2631 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2632 " the driver to shutdown the card to prevent permanent"
2633 " damage. Please contact Dell Support for assistance\n",
2636 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2638 int port = BP_PORT(bp);
2642 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2643 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2645 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2647 val = REG_RD(bp, reg_offset);
2648 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2649 REG_WR(bp, reg_offset, val);
2651 BNX2X_ERR("SPIO5 hw attention\n");
2653 /* Fan failure attention */
2654 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2655 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2656 /* Low power mode is controlled by GPIO 2 */
2657 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2658 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2659 /* The PHY reset is controlled by GPIO 1 */
2660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2661 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2667 bnx2x_fan_failure(bp);
2670 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2671 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2672 bnx2x_acquire_phy_lock(bp);
2673 bnx2x_handle_module_detect_int(&bp->link_params);
2674 bnx2x_release_phy_lock(bp);
2677 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2679 val = REG_RD(bp, reg_offset);
2680 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2681 REG_WR(bp, reg_offset, val);
2683 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2684 (attn & HW_INTERRUT_ASSERT_SET_0));
2689 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2693 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2695 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2696 BNX2X_ERR("DB hw attention 0x%x\n", val);
2697 /* DORQ discard attention */
2699 BNX2X_ERR("FATAL error from DORQ\n");
2702 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2704 int port = BP_PORT(bp);
2707 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2708 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2710 val = REG_RD(bp, reg_offset);
2711 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2712 REG_WR(bp, reg_offset, val);
2714 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2715 (attn & HW_INTERRUT_ASSERT_SET_1));
2720 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2724 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2726 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2727 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2728 /* CFC error attention */
2730 BNX2X_ERR("FATAL error from CFC\n");
2733 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2735 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2736 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2737 /* RQ_USDMDP_FIFO_OVERFLOW */
2739 BNX2X_ERR("FATAL error from PXP\n");
2742 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2744 int port = BP_PORT(bp);
2747 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2748 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2750 val = REG_RD(bp, reg_offset);
2751 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2752 REG_WR(bp, reg_offset, val);
2754 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2755 (attn & HW_INTERRUT_ASSERT_SET_2));
2760 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2764 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2766 if (attn & BNX2X_PMF_LINK_ASSERT) {
2767 int func = BP_FUNC(bp);
2769 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2770 bnx2x__link_status_update(bp);
2771 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2773 bnx2x_pmf_update(bp);
2775 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2777 BNX2X_ERR("MC assert!\n");
2778 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2779 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2780 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2781 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2784 } else if (attn & BNX2X_MCP_ASSERT) {
2786 BNX2X_ERR("MCP assert!\n");
2787 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2791 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2794 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2795 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2796 if (attn & BNX2X_GRC_TIMEOUT) {
2797 val = CHIP_IS_E1H(bp) ?
2798 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2799 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2801 if (attn & BNX2X_GRC_RSV) {
2802 val = CHIP_IS_E1H(bp) ?
2803 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2804 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2806 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2810 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2812 struct attn_route attn;
2813 struct attn_route group_mask;
2814 int port = BP_PORT(bp);
2820 /* need to take HW lock because MCP or other port might also
2821 try to handle this event */
2822 bnx2x_acquire_alr(bp);
2824 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2825 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2826 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2827 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2828 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2829 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2831 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2832 if (deasserted & (1 << index)) {
2833 group_mask = bp->attn_group[index];
2835 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2836 index, group_mask.sig[0], group_mask.sig[1],
2837 group_mask.sig[2], group_mask.sig[3]);
2839 bnx2x_attn_int_deasserted3(bp,
2840 attn.sig[3] & group_mask.sig[3]);
2841 bnx2x_attn_int_deasserted1(bp,
2842 attn.sig[1] & group_mask.sig[1]);
2843 bnx2x_attn_int_deasserted2(bp,
2844 attn.sig[2] & group_mask.sig[2]);
2845 bnx2x_attn_int_deasserted0(bp,
2846 attn.sig[0] & group_mask.sig[0]);
2848 if ((attn.sig[0] & group_mask.sig[0] &
2849 HW_PRTY_ASSERT_SET_0) ||
2850 (attn.sig[1] & group_mask.sig[1] &
2851 HW_PRTY_ASSERT_SET_1) ||
2852 (attn.sig[2] & group_mask.sig[2] &
2853 HW_PRTY_ASSERT_SET_2))
2854 BNX2X_ERR("FATAL HW block parity attention\n");
2858 bnx2x_release_alr(bp);
2860 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2863 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2865 REG_WR(bp, reg_addr, val);
2867 if (~bp->attn_state & deasserted)
2868 BNX2X_ERR("IGU ERROR\n");
2870 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2871 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2873 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2874 aeu_mask = REG_RD(bp, reg_addr);
2876 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2877 aeu_mask, deasserted);
2878 aeu_mask |= (deasserted & 0xff);
2879 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2881 REG_WR(bp, reg_addr, aeu_mask);
2882 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2884 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2885 bp->attn_state &= ~deasserted;
2886 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2889 static void bnx2x_attn_int(struct bnx2x *bp)
2891 /* read local copy of bits */
2892 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2894 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2896 u32 attn_state = bp->attn_state;
2898 /* look for changed bits */
2899 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2900 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2903 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2904 attn_bits, attn_ack, asserted, deasserted);
2906 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2907 BNX2X_ERR("BAD attention state\n");
2909 /* handle bits that were raised */
2911 bnx2x_attn_int_asserted(bp, asserted);
2914 bnx2x_attn_int_deasserted(bp, deasserted);
2917 static void bnx2x_sp_task(struct work_struct *work)
2919 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2923 /* Return here if interrupt is disabled */
2924 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2925 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2929 status = bnx2x_update_dsb_idx(bp);
2930 /* if (status == 0) */
2931 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2933 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2939 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2941 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2943 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2945 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2947 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2952 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2954 struct net_device *dev = dev_instance;
2955 struct bnx2x *bp = netdev_priv(dev);
2957 /* Return here if interrupt is disabled */
2958 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2959 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2963 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2965 #ifdef BNX2X_STOP_ON_ERROR
2966 if (unlikely(bp->panic))
2970 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2975 /* end of slow path */
2979 /****************************************************************************
2981 ****************************************************************************/
2983 /* sum[hi:lo] += add[hi:lo] */
2984 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2987 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2990 /* difference = minuend - subtrahend */
2991 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2993 if (m_lo < s_lo) { \
2995 d_hi = m_hi - s_hi; \
2997 /* we can 'loan' 1 */ \
2999 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3001 /* m_hi <= s_hi */ \
3006 /* m_lo >= s_lo */ \
3007 if (m_hi < s_hi) { \
3011 /* m_hi >= s_hi */ \
3012 d_hi = m_hi - s_hi; \
3013 d_lo = m_lo - s_lo; \
3018 #define UPDATE_STAT64(s, t) \
3020 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3021 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3022 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3023 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3024 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3025 pstats->mac_stx[1].t##_lo, diff.lo); \
3028 #define UPDATE_STAT64_NIG(s, t) \
3030 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3031 diff.lo, new->s##_lo, old->s##_lo); \
3032 ADD_64(estats->t##_hi, diff.hi, \
3033 estats->t##_lo, diff.lo); \
3036 /* sum[hi:lo] += add */
3037 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3040 s_hi += (s_lo < a) ? 1 : 0; \
3043 #define UPDATE_EXTEND_STAT(s) \
3045 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3046 pstats->mac_stx[1].s##_lo, \
3050 #define UPDATE_EXTEND_TSTAT(s, t) \
3052 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3053 old_tclient->s = tclient->s; \
3054 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3057 #define UPDATE_EXTEND_USTAT(s, t) \
3059 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3060 old_uclient->s = uclient->s; \
3061 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3064 #define UPDATE_EXTEND_XSTAT(s, t) \
3066 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3067 old_xclient->s = xclient->s; \
3068 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3071 /* minuend -= subtrahend */
3072 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3074 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3077 /* minuend[hi:lo] -= subtrahend */
3078 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3080 SUB_64(m_hi, 0, m_lo, s); \
3083 #define SUB_EXTEND_USTAT(s, t) \
3085 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3086 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3090 * General service functions
3093 static inline long bnx2x_hilo(u32 *hiref)
3095 u32 lo = *(hiref + 1);
3096 #if (BITS_PER_LONG == 64)
3099 return HILO_U64(hi, lo);
3106 * Init service functions
3109 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3111 if (!bp->stats_pending) {
3112 struct eth_query_ramrod_data ramrod_data = {0};
3115 ramrod_data.drv_counter = bp->stats_counter++;
3116 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3117 for_each_queue(bp, i)
3118 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3120 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3121 ((u32 *)&ramrod_data)[1],
3122 ((u32 *)&ramrod_data)[0], 0);
3124 /* stats ramrod has it's own slot on the spq */
3126 bp->stats_pending = 1;
3131 static void bnx2x_stats_init(struct bnx2x *bp)
3133 int port = BP_PORT(bp);
3136 bp->stats_pending = 0;
3137 bp->executer_idx = 0;
3138 bp->stats_counter = 0;
3142 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3144 bp->port.port_stx = 0;
3145 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3147 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3148 bp->port.old_nig_stats.brb_discard =
3149 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3150 bp->port.old_nig_stats.brb_truncate =
3151 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3152 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3153 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3154 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3155 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3157 /* function stats */
3158 for_each_queue(bp, i) {
3159 struct bnx2x_fastpath *fp = &bp->fp[i];
3161 memset(&fp->old_tclient, 0,
3162 sizeof(struct tstorm_per_client_stats));
3163 memset(&fp->old_uclient, 0,
3164 sizeof(struct ustorm_per_client_stats));
3165 memset(&fp->old_xclient, 0,
3166 sizeof(struct xstorm_per_client_stats));
3167 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3170 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3171 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3173 bp->stats_state = STATS_STATE_DISABLED;
3174 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3175 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3178 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3180 struct dmae_command *dmae = &bp->stats_dmae;
3181 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3183 *stats_comp = DMAE_COMP_VAL;
3184 if (CHIP_REV_IS_SLOW(bp))
3188 if (bp->executer_idx) {
3189 int loader_idx = PMF_DMAE_C(bp);
3191 memset(dmae, 0, sizeof(struct dmae_command));
3193 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3194 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3195 DMAE_CMD_DST_RESET |
3197 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3199 DMAE_CMD_ENDIANITY_DW_SWAP |
3201 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3203 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3204 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3205 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3206 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3207 sizeof(struct dmae_command) *
3208 (loader_idx + 1)) >> 2;
3209 dmae->dst_addr_hi = 0;
3210 dmae->len = sizeof(struct dmae_command) >> 2;
3213 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3214 dmae->comp_addr_hi = 0;
3218 bnx2x_post_dmae(bp, dmae, loader_idx);
3220 } else if (bp->func_stx) {
3222 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3226 static int bnx2x_stats_comp(struct bnx2x *bp)
3228 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3232 while (*stats_comp != DMAE_COMP_VAL) {
3234 BNX2X_ERR("timeout waiting for stats finished\n");
3244 * Statistics service functions
3247 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3249 struct dmae_command *dmae;
3251 int loader_idx = PMF_DMAE_C(bp);
3252 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3255 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3256 BNX2X_ERR("BUG!\n");
3260 bp->executer_idx = 0;
3262 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3264 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3266 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3268 DMAE_CMD_ENDIANITY_DW_SWAP |
3270 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3271 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3275 dmae->src_addr_lo = bp->port.port_stx >> 2;
3276 dmae->src_addr_hi = 0;
3277 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3278 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3279 dmae->len = DMAE_LEN32_RD_MAX;
3280 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3281 dmae->comp_addr_hi = 0;
3284 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3286 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3287 dmae->src_addr_hi = 0;
3288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3289 DMAE_LEN32_RD_MAX * 4);
3290 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3291 DMAE_LEN32_RD_MAX * 4);
3292 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3293 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3294 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3295 dmae->comp_val = DMAE_COMP_VAL;
3298 bnx2x_hw_stats_post(bp);
3299 bnx2x_stats_comp(bp);
3302 static void bnx2x_port_stats_init(struct bnx2x *bp)
3304 struct dmae_command *dmae;
3305 int port = BP_PORT(bp);
3306 int vn = BP_E1HVN(bp);
3308 int loader_idx = PMF_DMAE_C(bp);
3310 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3313 if (!bp->link_vars.link_up || !bp->port.pmf) {
3314 BNX2X_ERR("BUG!\n");
3318 bp->executer_idx = 0;
3321 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3322 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3323 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3325 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3327 DMAE_CMD_ENDIANITY_DW_SWAP |
3329 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3330 (vn << DMAE_CMD_E1HVN_SHIFT));
3332 if (bp->port.port_stx) {
3334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335 dmae->opcode = opcode;
3336 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3337 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3338 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3339 dmae->dst_addr_hi = 0;
3340 dmae->len = sizeof(struct host_port_stats) >> 2;
3341 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3342 dmae->comp_addr_hi = 0;
3348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349 dmae->opcode = opcode;
3350 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3351 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3352 dmae->dst_addr_lo = bp->func_stx >> 2;
3353 dmae->dst_addr_hi = 0;
3354 dmae->len = sizeof(struct host_func_stats) >> 2;
3355 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3356 dmae->comp_addr_hi = 0;
3361 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3362 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3363 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3365 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3367 DMAE_CMD_ENDIANITY_DW_SWAP |
3369 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3370 (vn << DMAE_CMD_E1HVN_SHIFT));
3372 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3374 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3375 NIG_REG_INGRESS_BMAC0_MEM);
3377 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3378 BIGMAC_REGISTER_TX_STAT_GTBYT */
3379 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3380 dmae->opcode = opcode;
3381 dmae->src_addr_lo = (mac_addr +
3382 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3383 dmae->src_addr_hi = 0;
3384 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3385 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3386 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3387 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3388 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3389 dmae->comp_addr_hi = 0;
3392 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3393 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3394 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3395 dmae->opcode = opcode;
3396 dmae->src_addr_lo = (mac_addr +
3397 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3398 dmae->src_addr_hi = 0;
3399 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3400 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3401 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3402 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3403 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3404 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3405 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3406 dmae->comp_addr_hi = 0;
3409 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3411 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3413 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3414 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3415 dmae->opcode = opcode;
3416 dmae->src_addr_lo = (mac_addr +
3417 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3418 dmae->src_addr_hi = 0;
3419 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3420 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3421 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3422 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3423 dmae->comp_addr_hi = 0;
3426 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3427 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3428 dmae->opcode = opcode;
3429 dmae->src_addr_lo = (mac_addr +
3430 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3431 dmae->src_addr_hi = 0;
3432 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3433 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3434 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3435 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3437 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3438 dmae->comp_addr_hi = 0;
3441 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3442 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3443 dmae->opcode = opcode;
3444 dmae->src_addr_lo = (mac_addr +
3445 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3446 dmae->src_addr_hi = 0;
3447 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3448 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3449 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3450 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3451 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3452 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3453 dmae->comp_addr_hi = 0;
3458 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3459 dmae->opcode = opcode;
3460 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3461 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3462 dmae->src_addr_hi = 0;
3463 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3464 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3465 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3466 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3467 dmae->comp_addr_hi = 0;
3470 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3471 dmae->opcode = opcode;
3472 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3473 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3474 dmae->src_addr_hi = 0;
3475 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3476 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3477 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3478 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3479 dmae->len = (2*sizeof(u32)) >> 2;
3480 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3481 dmae->comp_addr_hi = 0;
3484 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3485 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3486 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3487 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3489 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3491 DMAE_CMD_ENDIANITY_DW_SWAP |
3493 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3494 (vn << DMAE_CMD_E1HVN_SHIFT));
3495 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3496 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3497 dmae->src_addr_hi = 0;
3498 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3499 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3500 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3501 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3502 dmae->len = (2*sizeof(u32)) >> 2;
3503 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3504 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3505 dmae->comp_val = DMAE_COMP_VAL;
3510 static void bnx2x_func_stats_init(struct bnx2x *bp)
3512 struct dmae_command *dmae = &bp->stats_dmae;
3513 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3516 if (!bp->func_stx) {
3517 BNX2X_ERR("BUG!\n");
3521 bp->executer_idx = 0;
3522 memset(dmae, 0, sizeof(struct dmae_command));
3524 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3525 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3526 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3528 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3530 DMAE_CMD_ENDIANITY_DW_SWAP |
3532 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3533 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3534 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3535 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3536 dmae->dst_addr_lo = bp->func_stx >> 2;
3537 dmae->dst_addr_hi = 0;
3538 dmae->len = sizeof(struct host_func_stats) >> 2;
3539 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3540 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3541 dmae->comp_val = DMAE_COMP_VAL;
3546 static void bnx2x_stats_start(struct bnx2x *bp)
3549 bnx2x_port_stats_init(bp);
3551 else if (bp->func_stx)
3552 bnx2x_func_stats_init(bp);
3554 bnx2x_hw_stats_post(bp);
3555 bnx2x_storm_stats_post(bp);
3558 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3560 bnx2x_stats_comp(bp);
3561 bnx2x_stats_pmf_update(bp);
3562 bnx2x_stats_start(bp);
3565 static void bnx2x_stats_restart(struct bnx2x *bp)
3567 bnx2x_stats_comp(bp);
3568 bnx2x_stats_start(bp);
3571 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3573 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3574 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3575 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3581 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3582 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3583 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3584 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3585 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3586 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3587 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3588 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3589 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3590 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3591 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3592 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3593 UPDATE_STAT64(tx_stat_gt127,
3594 tx_stat_etherstatspkts65octetsto127octets);
3595 UPDATE_STAT64(tx_stat_gt255,
3596 tx_stat_etherstatspkts128octetsto255octets);
3597 UPDATE_STAT64(tx_stat_gt511,
3598 tx_stat_etherstatspkts256octetsto511octets);
3599 UPDATE_STAT64(tx_stat_gt1023,
3600 tx_stat_etherstatspkts512octetsto1023octets);
3601 UPDATE_STAT64(tx_stat_gt1518,
3602 tx_stat_etherstatspkts1024octetsto1522octets);
3603 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3604 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3605 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3606 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3607 UPDATE_STAT64(tx_stat_gterr,
3608 tx_stat_dot3statsinternalmactransmiterrors);
3609 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3611 estats->pause_frames_received_hi =
3612 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3613 estats->pause_frames_received_lo =
3614 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3616 estats->pause_frames_sent_hi =
3617 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3618 estats->pause_frames_sent_lo =
3619 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3622 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3624 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3625 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3626 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3628 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3629 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3630 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3631 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3632 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3633 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3634 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3635 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3636 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3637 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3638 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3639 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3640 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3641 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3642 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3643 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3644 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3645 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3646 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3647 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3648 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3649 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3650 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3651 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3652 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3653 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3654 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3655 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3656 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3657 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3658 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3660 estats->pause_frames_received_hi =
3661 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3662 estats->pause_frames_received_lo =
3663 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3664 ADD_64(estats->pause_frames_received_hi,
3665 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3666 estats->pause_frames_received_lo,
3667 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3669 estats->pause_frames_sent_hi =
3670 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3671 estats->pause_frames_sent_lo =
3672 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3673 ADD_64(estats->pause_frames_sent_hi,
3674 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3675 estats->pause_frames_sent_lo,
3676 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3679 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3681 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3682 struct nig_stats *old = &(bp->port.old_nig_stats);
3683 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3684 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3691 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3692 bnx2x_bmac_stats_update(bp);
3694 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3695 bnx2x_emac_stats_update(bp);
3697 else { /* unreached */
3698 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3702 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3703 new->brb_discard - old->brb_discard);
3704 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3705 new->brb_truncate - old->brb_truncate);
3707 UPDATE_STAT64_NIG(egress_mac_pkt0,
3708 etherstatspkts1024octetsto1522octets);
3709 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3711 memcpy(old, new, sizeof(struct nig_stats));
3713 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3714 sizeof(struct mac_stx));
3715 estats->brb_drop_hi = pstats->brb_drop_hi;
3716 estats->brb_drop_lo = pstats->brb_drop_lo;
3718 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3720 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3721 if (nig_timer_max != estats->nig_timer_max) {
3722 estats->nig_timer_max = nig_timer_max;
3723 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3729 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3731 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3732 struct tstorm_per_port_stats *tport =
3733 &stats->tstorm_common.port_statistics;
3734 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3735 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3738 memset(&(fstats->total_bytes_received_hi), 0,
3739 sizeof(struct host_func_stats) - 2*sizeof(u32));
3740 estats->error_bytes_received_hi = 0;
3741 estats->error_bytes_received_lo = 0;
3742 estats->etherstatsoverrsizepkts_hi = 0;
3743 estats->etherstatsoverrsizepkts_lo = 0;
3744 estats->no_buff_discard_hi = 0;
3745 estats->no_buff_discard_lo = 0;
3747 for_each_queue(bp, i) {
3748 struct bnx2x_fastpath *fp = &bp->fp[i];
3749 int cl_id = fp->cl_id;
3750 struct tstorm_per_client_stats *tclient =
3751 &stats->tstorm_common.client_statistics[cl_id];
3752 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3753 struct ustorm_per_client_stats *uclient =
3754 &stats->ustorm_common.client_statistics[cl_id];
3755 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3756 struct xstorm_per_client_stats *xclient =
3757 &stats->xstorm_common.client_statistics[cl_id];
3758 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3759 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3762 /* are storm stats valid? */
3763 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3764 bp->stats_counter) {
3765 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3766 " xstorm counter (%d) != stats_counter (%d)\n",
3767 i, xclient->stats_counter, bp->stats_counter);
3770 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3771 bp->stats_counter) {
3772 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3773 " tstorm counter (%d) != stats_counter (%d)\n",
3774 i, tclient->stats_counter, bp->stats_counter);
3777 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3778 bp->stats_counter) {
3779 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3780 " ustorm counter (%d) != stats_counter (%d)\n",
3781 i, uclient->stats_counter, bp->stats_counter);
3785 qstats->total_bytes_received_hi =
3786 qstats->valid_bytes_received_hi =
3787 le32_to_cpu(tclient->total_rcv_bytes.hi);
3788 qstats->total_bytes_received_lo =
3789 qstats->valid_bytes_received_lo =
3790 le32_to_cpu(tclient->total_rcv_bytes.lo);
3792 qstats->error_bytes_received_hi =
3793 le32_to_cpu(tclient->rcv_error_bytes.hi);
3794 qstats->error_bytes_received_lo =
3795 le32_to_cpu(tclient->rcv_error_bytes.lo);
3797 ADD_64(qstats->total_bytes_received_hi,
3798 qstats->error_bytes_received_hi,
3799 qstats->total_bytes_received_lo,
3800 qstats->error_bytes_received_lo);
3802 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3803 total_unicast_packets_received);
3804 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3805 total_multicast_packets_received);
3806 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3807 total_broadcast_packets_received);
3808 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3809 etherstatsoverrsizepkts);
3810 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3812 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3813 total_unicast_packets_received);
3814 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3815 total_multicast_packets_received);
3816 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3817 total_broadcast_packets_received);
3818 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3819 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3820 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3822 qstats->total_bytes_transmitted_hi =
3823 le32_to_cpu(xclient->total_sent_bytes.hi);
3824 qstats->total_bytes_transmitted_lo =
3825 le32_to_cpu(xclient->total_sent_bytes.lo);
3827 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3828 total_unicast_packets_transmitted);
3829 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3830 total_multicast_packets_transmitted);
3831 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3832 total_broadcast_packets_transmitted);
3834 old_tclient->checksum_discard = tclient->checksum_discard;
3835 old_tclient->ttl0_discard = tclient->ttl0_discard;
3837 ADD_64(fstats->total_bytes_received_hi,
3838 qstats->total_bytes_received_hi,
3839 fstats->total_bytes_received_lo,
3840 qstats->total_bytes_received_lo);
3841 ADD_64(fstats->total_bytes_transmitted_hi,
3842 qstats->total_bytes_transmitted_hi,
3843 fstats->total_bytes_transmitted_lo,
3844 qstats->total_bytes_transmitted_lo);
3845 ADD_64(fstats->total_unicast_packets_received_hi,
3846 qstats->total_unicast_packets_received_hi,
3847 fstats->total_unicast_packets_received_lo,
3848 qstats->total_unicast_packets_received_lo);
3849 ADD_64(fstats->total_multicast_packets_received_hi,
3850 qstats->total_multicast_packets_received_hi,
3851 fstats->total_multicast_packets_received_lo,
3852 qstats->total_multicast_packets_received_lo);
3853 ADD_64(fstats->total_broadcast_packets_received_hi,
3854 qstats->total_broadcast_packets_received_hi,
3855 fstats->total_broadcast_packets_received_lo,
3856 qstats->total_broadcast_packets_received_lo);
3857 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3858 qstats->total_unicast_packets_transmitted_hi,
3859 fstats->total_unicast_packets_transmitted_lo,
3860 qstats->total_unicast_packets_transmitted_lo);
3861 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3862 qstats->total_multicast_packets_transmitted_hi,
3863 fstats->total_multicast_packets_transmitted_lo,
3864 qstats->total_multicast_packets_transmitted_lo);
3865 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3866 qstats->total_broadcast_packets_transmitted_hi,
3867 fstats->total_broadcast_packets_transmitted_lo,
3868 qstats->total_broadcast_packets_transmitted_lo);
3869 ADD_64(fstats->valid_bytes_received_hi,
3870 qstats->valid_bytes_received_hi,
3871 fstats->valid_bytes_received_lo,
3872 qstats->valid_bytes_received_lo);
3874 ADD_64(estats->error_bytes_received_hi,
3875 qstats->error_bytes_received_hi,
3876 estats->error_bytes_received_lo,
3877 qstats->error_bytes_received_lo);
3878 ADD_64(estats->etherstatsoverrsizepkts_hi,
3879 qstats->etherstatsoverrsizepkts_hi,
3880 estats->etherstatsoverrsizepkts_lo,
3881 qstats->etherstatsoverrsizepkts_lo);
3882 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3883 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3886 ADD_64(fstats->total_bytes_received_hi,
3887 estats->rx_stat_ifhcinbadoctets_hi,
3888 fstats->total_bytes_received_lo,
3889 estats->rx_stat_ifhcinbadoctets_lo);
3891 memcpy(estats, &(fstats->total_bytes_received_hi),
3892 sizeof(struct host_func_stats) - 2*sizeof(u32));
3894 ADD_64(estats->etherstatsoverrsizepkts_hi,
3895 estats->rx_stat_dot3statsframestoolong_hi,
3896 estats->etherstatsoverrsizepkts_lo,
3897 estats->rx_stat_dot3statsframestoolong_lo);
3898 ADD_64(estats->error_bytes_received_hi,
3899 estats->rx_stat_ifhcinbadoctets_hi,
3900 estats->error_bytes_received_lo,
3901 estats->rx_stat_ifhcinbadoctets_lo);
3904 estats->mac_filter_discard =
3905 le32_to_cpu(tport->mac_filter_discard);
3906 estats->xxoverflow_discard =
3907 le32_to_cpu(tport->xxoverflow_discard);
3908 estats->brb_truncate_discard =
3909 le32_to_cpu(tport->brb_truncate_discard);
3910 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3913 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3915 bp->stats_pending = 0;
3920 static void bnx2x_net_stats_update(struct bnx2x *bp)
3922 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3923 struct net_device_stats *nstats = &bp->dev->stats;
3926 nstats->rx_packets =
3927 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3928 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3929 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3931 nstats->tx_packets =
3932 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3933 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3934 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3936 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3938 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3940 nstats->rx_dropped = estats->mac_discard;
3941 for_each_queue(bp, i)
3942 nstats->rx_dropped +=
3943 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3945 nstats->tx_dropped = 0;
3948 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3950 nstats->collisions =
3951 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3953 nstats->rx_length_errors =
3954 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3955 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3956 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3957 bnx2x_hilo(&estats->brb_truncate_hi);
3958 nstats->rx_crc_errors =
3959 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3960 nstats->rx_frame_errors =
3961 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3962 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3963 nstats->rx_missed_errors = estats->xxoverflow_discard;
3965 nstats->rx_errors = nstats->rx_length_errors +
3966 nstats->rx_over_errors +
3967 nstats->rx_crc_errors +
3968 nstats->rx_frame_errors +
3969 nstats->rx_fifo_errors +
3970 nstats->rx_missed_errors;
3972 nstats->tx_aborted_errors =
3973 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3974 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3975 nstats->tx_carrier_errors =
3976 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3977 nstats->tx_fifo_errors = 0;
3978 nstats->tx_heartbeat_errors = 0;
3979 nstats->tx_window_errors = 0;
3981 nstats->tx_errors = nstats->tx_aborted_errors +
3982 nstats->tx_carrier_errors +
3983 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3986 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3988 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3991 estats->driver_xoff = 0;
3992 estats->rx_err_discard_pkt = 0;
3993 estats->rx_skb_alloc_failed = 0;
3994 estats->hw_csum_err = 0;
3995 for_each_queue(bp, i) {
3996 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3998 estats->driver_xoff += qstats->driver_xoff;
3999 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4000 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4001 estats->hw_csum_err += qstats->hw_csum_err;
4005 static void bnx2x_stats_update(struct bnx2x *bp)
4007 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4009 if (*stats_comp != DMAE_COMP_VAL)
4013 bnx2x_hw_stats_update(bp);
4015 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4016 BNX2X_ERR("storm stats were not updated for 3 times\n");
4021 bnx2x_net_stats_update(bp);
4022 bnx2x_drv_stats_update(bp);
4024 if (bp->msglevel & NETIF_MSG_TIMER) {
4025 struct tstorm_per_client_stats *old_tclient =
4026 &bp->fp->old_tclient;
4027 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4028 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4029 struct net_device_stats *nstats = &bp->dev->stats;
4032 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4033 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4035 bnx2x_tx_avail(bp->fp),
4036 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4037 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4039 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4040 bp->fp->rx_comp_cons),
4041 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4042 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4043 "brb truncate %u\n",
4044 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4045 qstats->driver_xoff,
4046 estats->brb_drop_lo, estats->brb_truncate_lo);
4047 printk(KERN_DEBUG "tstats: checksum_discard %u "
4048 "packets_too_big_discard %lu no_buff_discard %lu "
4049 "mac_discard %u mac_filter_discard %u "
4050 "xxovrflow_discard %u brb_truncate_discard %u "
4051 "ttl0_discard %u\n",
4052 le32_to_cpu(old_tclient->checksum_discard),
4053 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4054 bnx2x_hilo(&qstats->no_buff_discard_hi),
4055 estats->mac_discard, estats->mac_filter_discard,
4056 estats->xxoverflow_discard, estats->brb_truncate_discard,
4057 le32_to_cpu(old_tclient->ttl0_discard));
4059 for_each_queue(bp, i) {
4060 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4061 bnx2x_fp(bp, i, tx_pkt),
4062 bnx2x_fp(bp, i, rx_pkt),
4063 bnx2x_fp(bp, i, rx_calls));
4067 bnx2x_hw_stats_post(bp);
4068 bnx2x_storm_stats_post(bp);
4071 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4073 struct dmae_command *dmae;
4075 int loader_idx = PMF_DMAE_C(bp);
4076 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4078 bp->executer_idx = 0;
4080 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4082 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4084 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4086 DMAE_CMD_ENDIANITY_DW_SWAP |
4088 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4089 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4091 if (bp->port.port_stx) {
4093 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4095 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4097 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4098 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4099 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4100 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4101 dmae->dst_addr_hi = 0;
4102 dmae->len = sizeof(struct host_port_stats) >> 2;
4104 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4105 dmae->comp_addr_hi = 0;
4108 dmae->comp_addr_lo =
4109 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4110 dmae->comp_addr_hi =
4111 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4112 dmae->comp_val = DMAE_COMP_VAL;
4120 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4121 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4122 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4123 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4124 dmae->dst_addr_lo = bp->func_stx >> 2;
4125 dmae->dst_addr_hi = 0;
4126 dmae->len = sizeof(struct host_func_stats) >> 2;
4127 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4128 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4129 dmae->comp_val = DMAE_COMP_VAL;
4135 static void bnx2x_stats_stop(struct bnx2x *bp)
4139 bnx2x_stats_comp(bp);
4142 update = (bnx2x_hw_stats_update(bp) == 0);
4144 update |= (bnx2x_storm_stats_update(bp) == 0);
4147 bnx2x_net_stats_update(bp);
4150 bnx2x_port_stats_stop(bp);
4152 bnx2x_hw_stats_post(bp);
4153 bnx2x_stats_comp(bp);
4157 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4161 static const struct {
4162 void (*action)(struct bnx2x *bp);
4163 enum bnx2x_stats_state next_state;
4164 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4167 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4168 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4169 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4170 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4173 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4174 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4175 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4176 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4180 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4182 enum bnx2x_stats_state state = bp->stats_state;
4184 bnx2x_stats_stm[state][event].action(bp);
4185 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4187 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4188 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4189 state, event, bp->stats_state);
4192 static void bnx2x_timer(unsigned long data)
4194 struct bnx2x *bp = (struct bnx2x *) data;
4196 if (!netif_running(bp->dev))
4199 if (atomic_read(&bp->intr_sem) != 0)
4203 struct bnx2x_fastpath *fp = &bp->fp[0];
4207 rc = bnx2x_rx_int(fp, 1000);
4210 if (!BP_NOMCP(bp)) {
4211 int func = BP_FUNC(bp);
4215 ++bp->fw_drv_pulse_wr_seq;
4216 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4217 /* TBD - add SYSTEM_TIME */
4218 drv_pulse = bp->fw_drv_pulse_wr_seq;
4219 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4221 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4222 MCP_PULSE_SEQ_MASK);
4223 /* The delta between driver pulse and mcp response
4224 * should be 1 (before mcp response) or 0 (after mcp response)
4226 if ((drv_pulse != mcp_pulse) &&
4227 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4228 /* someone lost a heartbeat... */
4229 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4230 drv_pulse, mcp_pulse);
4234 if ((bp->state == BNX2X_STATE_OPEN) ||
4235 (bp->state == BNX2X_STATE_DISABLED))
4236 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4239 mod_timer(&bp->timer, jiffies + bp->current_interval);
4242 /* end of Statistics */
4247 * nic init service functions
4250 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4252 int port = BP_PORT(bp);
4254 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4255 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4256 sizeof(struct ustorm_status_block)/4);
4257 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4258 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4259 sizeof(struct cstorm_status_block)/4);
4262 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4263 dma_addr_t mapping, int sb_id)
4265 int port = BP_PORT(bp);
4266 int func = BP_FUNC(bp);
4271 section = ((u64)mapping) + offsetof(struct host_status_block,
4273 sb->u_status_block.status_block_id = sb_id;
4275 REG_WR(bp, BAR_USTRORM_INTMEM +
4276 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4277 REG_WR(bp, BAR_USTRORM_INTMEM +
4278 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4280 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4281 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4283 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4284 REG_WR16(bp, BAR_USTRORM_INTMEM +
4285 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4288 section = ((u64)mapping) + offsetof(struct host_status_block,
4290 sb->c_status_block.status_block_id = sb_id;
4292 REG_WR(bp, BAR_CSTRORM_INTMEM +
4293 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4294 REG_WR(bp, BAR_CSTRORM_INTMEM +
4295 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4297 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4298 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4300 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4301 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4302 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4304 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4307 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4309 int func = BP_FUNC(bp);
4311 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4312 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4313 sizeof(struct tstorm_def_status_block)/4);
4314 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4315 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4316 sizeof(struct ustorm_def_status_block)/4);
4317 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4318 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4319 sizeof(struct cstorm_def_status_block)/4);
4320 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4321 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4322 sizeof(struct xstorm_def_status_block)/4);
4325 static void bnx2x_init_def_sb(struct bnx2x *bp,
4326 struct host_def_status_block *def_sb,
4327 dma_addr_t mapping, int sb_id)
4329 int port = BP_PORT(bp);
4330 int func = BP_FUNC(bp);
4331 int index, val, reg_offset;
4335 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4336 atten_status_block);
4337 def_sb->atten_status_block.status_block_id = sb_id;
4341 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4342 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4344 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4345 bp->attn_group[index].sig[0] = REG_RD(bp,
4346 reg_offset + 0x10*index);
4347 bp->attn_group[index].sig[1] = REG_RD(bp,
4348 reg_offset + 0x4 + 0x10*index);
4349 bp->attn_group[index].sig[2] = REG_RD(bp,
4350 reg_offset + 0x8 + 0x10*index);
4351 bp->attn_group[index].sig[3] = REG_RD(bp,
4352 reg_offset + 0xc + 0x10*index);
4355 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4356 HC_REG_ATTN_MSG0_ADDR_L);
4358 REG_WR(bp, reg_offset, U64_LO(section));
4359 REG_WR(bp, reg_offset + 4, U64_HI(section));
4361 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4363 val = REG_RD(bp, reg_offset);
4365 REG_WR(bp, reg_offset, val);
4368 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4369 u_def_status_block);
4370 def_sb->u_def_status_block.status_block_id = sb_id;
4372 REG_WR(bp, BAR_USTRORM_INTMEM +
4373 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4374 REG_WR(bp, BAR_USTRORM_INTMEM +
4375 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4377 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4378 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4380 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4381 REG_WR16(bp, BAR_USTRORM_INTMEM +
4382 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4385 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4386 c_def_status_block);
4387 def_sb->c_def_status_block.status_block_id = sb_id;
4389 REG_WR(bp, BAR_CSTRORM_INTMEM +
4390 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4391 REG_WR(bp, BAR_CSTRORM_INTMEM +
4392 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4394 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4395 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4397 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4398 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4399 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4402 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4403 t_def_status_block);
4404 def_sb->t_def_status_block.status_block_id = sb_id;
4406 REG_WR(bp, BAR_TSTRORM_INTMEM +
4407 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4408 REG_WR(bp, BAR_TSTRORM_INTMEM +
4409 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4411 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4412 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4414 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4415 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4416 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4419 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4420 x_def_status_block);
4421 def_sb->x_def_status_block.status_block_id = sb_id;
4423 REG_WR(bp, BAR_XSTRORM_INTMEM +
4424 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4425 REG_WR(bp, BAR_XSTRORM_INTMEM +
4426 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4428 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4429 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4431 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4432 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4433 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4435 bp->stats_pending = 0;
4436 bp->set_mac_pending = 0;
4438 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4441 static void bnx2x_update_coalesce(struct bnx2x *bp)
4443 int port = BP_PORT(bp);
4446 for_each_queue(bp, i) {
4447 int sb_id = bp->fp[i].sb_id;
4449 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4450 REG_WR8(bp, BAR_USTRORM_INTMEM +
4451 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4452 U_SB_ETH_RX_CQ_INDEX),
4454 REG_WR16(bp, BAR_USTRORM_INTMEM +
4455 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4456 U_SB_ETH_RX_CQ_INDEX),
4457 (bp->rx_ticks/12) ? 0 : 1);
4459 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4460 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4461 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4462 C_SB_ETH_TX_CQ_INDEX),
4464 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4465 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4466 C_SB_ETH_TX_CQ_INDEX),
4467 (bp->tx_ticks/12) ? 0 : 1);
4471 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4472 struct bnx2x_fastpath *fp, int last)
4476 for (i = 0; i < last; i++) {
4477 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4478 struct sk_buff *skb = rx_buf->skb;
4481 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4485 if (fp->tpa_state[i] == BNX2X_TPA_START)
4486 pci_unmap_single(bp->pdev,
4487 pci_unmap_addr(rx_buf, mapping),
4488 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4495 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4497 int func = BP_FUNC(bp);
4498 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4499 ETH_MAX_AGGREGATION_QUEUES_E1H;
4500 u16 ring_prod, cqe_ring_prod;
4503 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4505 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4507 if (bp->flags & TPA_ENABLE_FLAG) {
4509 for_each_rx_queue(bp, j) {
4510 struct bnx2x_fastpath *fp = &bp->fp[j];
4512 for (i = 0; i < max_agg_queues; i++) {
4513 fp->tpa_pool[i].skb =
4514 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4515 if (!fp->tpa_pool[i].skb) {
4516 BNX2X_ERR("Failed to allocate TPA "
4517 "skb pool for queue[%d] - "
4518 "disabling TPA on this "
4520 bnx2x_free_tpa_pool(bp, fp, i);
4521 fp->disable_tpa = 1;
4524 pci_unmap_addr_set((struct sw_rx_bd *)
4525 &bp->fp->tpa_pool[i],
4527 fp->tpa_state[i] = BNX2X_TPA_STOP;
4532 for_each_rx_queue(bp, j) {
4533 struct bnx2x_fastpath *fp = &bp->fp[j];
4536 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4537 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4539 /* "next page" elements initialization */
4541 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4542 struct eth_rx_sge *sge;
4544 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4546 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4547 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4549 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4550 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4553 bnx2x_init_sge_ring_bit_mask(fp);
4556 for (i = 1; i <= NUM_RX_RINGS; i++) {
4557 struct eth_rx_bd *rx_bd;
4559 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4561 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4562 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4564 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4565 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4569 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4570 struct eth_rx_cqe_next_page *nextpg;
4572 nextpg = (struct eth_rx_cqe_next_page *)
4573 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4575 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4576 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4578 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4579 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4582 /* Allocate SGEs and initialize the ring elements */
4583 for (i = 0, ring_prod = 0;
4584 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4586 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4587 BNX2X_ERR("was only able to allocate "
4589 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4590 /* Cleanup already allocated elements */
4591 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4592 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4593 fp->disable_tpa = 1;
4597 ring_prod = NEXT_SGE_IDX(ring_prod);
4599 fp->rx_sge_prod = ring_prod;
4601 /* Allocate BDs and initialize BD ring */
4602 fp->rx_comp_cons = 0;
4603 cqe_ring_prod = ring_prod = 0;
4604 for (i = 0; i < bp->rx_ring_size; i++) {
4605 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4606 BNX2X_ERR("was only able to allocate "
4607 "%d rx skbs on queue[%d]\n", i, j);
4608 fp->eth_q_stats.rx_skb_alloc_failed++;
4611 ring_prod = NEXT_RX_IDX(ring_prod);
4612 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4613 WARN_ON(ring_prod <= i);
4616 fp->rx_bd_prod = ring_prod;
4617 /* must not have more available CQEs than BDs */
4618 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4620 fp->rx_pkt = fp->rx_calls = 0;
4623 * this will generate an interrupt (to the TSTORM)
4624 * must only be done after chip is initialized
4626 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4631 REG_WR(bp, BAR_USTRORM_INTMEM +
4632 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4633 U64_LO(fp->rx_comp_mapping));
4634 REG_WR(bp, BAR_USTRORM_INTMEM +
4635 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4636 U64_HI(fp->rx_comp_mapping));
4640 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4644 for_each_tx_queue(bp, j) {
4645 struct bnx2x_fastpath *fp = &bp->fp[j];
4647 for (i = 1; i <= NUM_TX_RINGS; i++) {
4648 struct eth_tx_bd *tx_bd =
4649 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4652 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4653 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4655 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4656 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4659 fp->tx_pkt_prod = 0;
4660 fp->tx_pkt_cons = 0;
4663 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4668 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4670 int func = BP_FUNC(bp);
4672 spin_lock_init(&bp->spq_lock);
4674 bp->spq_left = MAX_SPQ_PENDING;
4675 bp->spq_prod_idx = 0;
4676 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4677 bp->spq_prod_bd = bp->spq;
4678 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4680 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4681 U64_LO(bp->spq_mapping));
4683 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4684 U64_HI(bp->spq_mapping));
4686 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4690 static void bnx2x_init_context(struct bnx2x *bp)
4694 for_each_queue(bp, i) {
4695 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4696 struct bnx2x_fastpath *fp = &bp->fp[i];
4697 u8 cl_id = fp->cl_id;
4698 u8 sb_id = fp->sb_id;
4700 context->ustorm_st_context.common.sb_index_numbers =
4701 BNX2X_RX_SB_INDEX_NUM;
4702 context->ustorm_st_context.common.clientId = cl_id;
4703 context->ustorm_st_context.common.status_block_id = sb_id;
4704 context->ustorm_st_context.common.flags =
4705 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4706 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4707 context->ustorm_st_context.common.statistics_counter_id =
4709 context->ustorm_st_context.common.mc_alignment_log_size =
4710 BNX2X_RX_ALIGN_SHIFT;
4711 context->ustorm_st_context.common.bd_buff_size =
4713 context->ustorm_st_context.common.bd_page_base_hi =
4714 U64_HI(fp->rx_desc_mapping);
4715 context->ustorm_st_context.common.bd_page_base_lo =
4716 U64_LO(fp->rx_desc_mapping);
4717 if (!fp->disable_tpa) {
4718 context->ustorm_st_context.common.flags |=
4719 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4720 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4721 context->ustorm_st_context.common.sge_buff_size =
4722 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4724 context->ustorm_st_context.common.sge_page_base_hi =
4725 U64_HI(fp->rx_sge_mapping);
4726 context->ustorm_st_context.common.sge_page_base_lo =
4727 U64_LO(fp->rx_sge_mapping);
4730 context->ustorm_ag_context.cdu_usage =
4731 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4732 CDU_REGION_NUMBER_UCM_AG,
4733 ETH_CONNECTION_TYPE);
4735 context->xstorm_st_context.tx_bd_page_base_hi =
4736 U64_HI(fp->tx_desc_mapping);
4737 context->xstorm_st_context.tx_bd_page_base_lo =
4738 U64_LO(fp->tx_desc_mapping);
4739 context->xstorm_st_context.db_data_addr_hi =
4740 U64_HI(fp->tx_prods_mapping);
4741 context->xstorm_st_context.db_data_addr_lo =
4742 U64_LO(fp->tx_prods_mapping);
4743 context->xstorm_st_context.statistics_data = (cl_id |
4744 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4745 context->cstorm_st_context.sb_index_number =
4746 C_SB_ETH_TX_CQ_INDEX;
4747 context->cstorm_st_context.status_block_id = sb_id;
4749 context->xstorm_ag_context.cdu_reserved =
4750 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4751 CDU_REGION_NUMBER_XCM_AG,
4752 ETH_CONNECTION_TYPE);
4756 static void bnx2x_init_ind_table(struct bnx2x *bp)
4758 int func = BP_FUNC(bp);
4761 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4765 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4766 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4767 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4768 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4769 bp->fp->cl_id + (i % bp->num_rx_queues));
4772 static void bnx2x_set_client_config(struct bnx2x *bp)
4774 struct tstorm_eth_client_config tstorm_client = {0};
4775 int port = BP_PORT(bp);
4778 tstorm_client.mtu = bp->dev->mtu;
4779 tstorm_client.config_flags =
4780 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4781 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4783 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4784 tstorm_client.config_flags |=
4785 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4786 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4790 if (bp->flags & TPA_ENABLE_FLAG) {
4791 tstorm_client.max_sges_for_packet =
4792 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4793 tstorm_client.max_sges_for_packet =
4794 ((tstorm_client.max_sges_for_packet +
4795 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4796 PAGES_PER_SGE_SHIFT;
4798 tstorm_client.config_flags |=
4799 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4802 for_each_queue(bp, i) {
4803 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4805 REG_WR(bp, BAR_TSTRORM_INTMEM +
4806 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4807 ((u32 *)&tstorm_client)[0]);
4808 REG_WR(bp, BAR_TSTRORM_INTMEM +
4809 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4810 ((u32 *)&tstorm_client)[1]);
4813 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4814 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4817 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4819 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4820 int mode = bp->rx_mode;
4821 int mask = (1 << BP_L_ID(bp));
4822 int func = BP_FUNC(bp);
4825 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4828 case BNX2X_RX_MODE_NONE: /* no Rx */
4829 tstorm_mac_filter.ucast_drop_all = mask;
4830 tstorm_mac_filter.mcast_drop_all = mask;
4831 tstorm_mac_filter.bcast_drop_all = mask;
4834 case BNX2X_RX_MODE_NORMAL:
4835 tstorm_mac_filter.bcast_accept_all = mask;
4838 case BNX2X_RX_MODE_ALLMULTI:
4839 tstorm_mac_filter.mcast_accept_all = mask;
4840 tstorm_mac_filter.bcast_accept_all = mask;
4843 case BNX2X_RX_MODE_PROMISC:
4844 tstorm_mac_filter.ucast_accept_all = mask;
4845 tstorm_mac_filter.mcast_accept_all = mask;
4846 tstorm_mac_filter.bcast_accept_all = mask;
4850 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4854 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4855 REG_WR(bp, BAR_TSTRORM_INTMEM +
4856 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4857 ((u32 *)&tstorm_mac_filter)[i]);
4859 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4860 ((u32 *)&tstorm_mac_filter)[i]); */
4863 if (mode != BNX2X_RX_MODE_NONE)
4864 bnx2x_set_client_config(bp);
4867 static void bnx2x_init_internal_common(struct bnx2x *bp)
4871 if (bp->flags & TPA_ENABLE_FLAG) {
4872 struct tstorm_eth_tpa_exist tpa = {0};
4876 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4878 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4882 /* Zero this manually as its initialization is
4883 currently missing in the initTool */
4884 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4885 REG_WR(bp, BAR_USTRORM_INTMEM +
4886 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4889 static void bnx2x_init_internal_port(struct bnx2x *bp)
4891 int port = BP_PORT(bp);
4893 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4894 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4895 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4896 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4899 /* Calculates the sum of vn_min_rates.
4900 It's needed for further normalizing of the min_rates.
4902 sum of vn_min_rates.
4904 0 - if all the min_rates are 0.
4905 In the later case fainess algorithm should be deactivated.
4906 If not all min_rates are zero then those that are zeroes will be set to 1.
4908 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4911 int port = BP_PORT(bp);
4914 bp->vn_weight_sum = 0;
4915 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4916 int func = 2*vn + port;
4918 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4919 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4920 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4922 /* Skip hidden vns */
4923 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4926 /* If min rate is zero - set it to 1 */
4928 vn_min_rate = DEF_MIN_RATE;
4932 bp->vn_weight_sum += vn_min_rate;
4935 /* ... only if all min rates are zeros - disable fairness */
4937 bp->vn_weight_sum = 0;
4940 static void bnx2x_init_internal_func(struct bnx2x *bp)
4942 struct tstorm_eth_function_common_config tstorm_config = {0};
4943 struct stats_indication_flags stats_flags = {0};
4944 int port = BP_PORT(bp);
4945 int func = BP_FUNC(bp);
4951 tstorm_config.config_flags = MULTI_FLAGS(bp);
4952 tstorm_config.rss_result_mask = MULTI_MASK;
4955 tstorm_config.config_flags |=
4956 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4958 tstorm_config.leading_client_id = BP_L_ID(bp);
4960 REG_WR(bp, BAR_TSTRORM_INTMEM +
4961 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4962 (*(u32 *)&tstorm_config));
4964 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4965 bnx2x_set_storm_rx_mode(bp);
4967 for_each_queue(bp, i) {
4968 u8 cl_id = bp->fp[i].cl_id;
4970 /* reset xstorm per client statistics */
4971 offset = BAR_XSTRORM_INTMEM +
4972 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4974 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4975 REG_WR(bp, offset + j*4, 0);
4977 /* reset tstorm per client statistics */
4978 offset = BAR_TSTRORM_INTMEM +
4979 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4981 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4982 REG_WR(bp, offset + j*4, 0);
4984 /* reset ustorm per client statistics */
4985 offset = BAR_USTRORM_INTMEM +
4986 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4988 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4989 REG_WR(bp, offset + j*4, 0);
4992 /* Init statistics related context */
4993 stats_flags.collect_eth = 1;
4995 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4996 ((u32 *)&stats_flags)[0]);
4997 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4998 ((u32 *)&stats_flags)[1]);
5000 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5001 ((u32 *)&stats_flags)[0]);
5002 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5003 ((u32 *)&stats_flags)[1]);
5005 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5006 ((u32 *)&stats_flags)[0]);
5007 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5008 ((u32 *)&stats_flags)[1]);
5010 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5011 ((u32 *)&stats_flags)[0]);
5012 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5013 ((u32 *)&stats_flags)[1]);
5015 REG_WR(bp, BAR_XSTRORM_INTMEM +
5016 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5017 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5018 REG_WR(bp, BAR_XSTRORM_INTMEM +
5019 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5020 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5022 REG_WR(bp, BAR_TSTRORM_INTMEM +
5023 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5024 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5025 REG_WR(bp, BAR_TSTRORM_INTMEM +
5026 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5027 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5029 REG_WR(bp, BAR_USTRORM_INTMEM +
5030 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5031 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5032 REG_WR(bp, BAR_USTRORM_INTMEM +
5033 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5034 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5036 if (CHIP_IS_E1H(bp)) {
5037 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5039 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5041 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5043 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5046 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5050 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5052 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5053 SGE_PAGE_SIZE * PAGES_PER_SGE),
5055 for_each_rx_queue(bp, i) {
5056 struct bnx2x_fastpath *fp = &bp->fp[i];
5058 REG_WR(bp, BAR_USTRORM_INTMEM +
5059 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5060 U64_LO(fp->rx_comp_mapping));
5061 REG_WR(bp, BAR_USTRORM_INTMEM +
5062 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5063 U64_HI(fp->rx_comp_mapping));
5065 REG_WR16(bp, BAR_USTRORM_INTMEM +
5066 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5070 /* dropless flow control */
5071 if (CHIP_IS_E1H(bp)) {
5072 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5074 rx_pause.bd_thr_low = 250;
5075 rx_pause.cqe_thr_low = 250;
5077 rx_pause.sge_thr_low = 0;
5078 rx_pause.bd_thr_high = 350;
5079 rx_pause.cqe_thr_high = 350;
5080 rx_pause.sge_thr_high = 0;
5082 for_each_rx_queue(bp, i) {
5083 struct bnx2x_fastpath *fp = &bp->fp[i];
5085 if (!fp->disable_tpa) {
5086 rx_pause.sge_thr_low = 150;
5087 rx_pause.sge_thr_high = 250;
5091 offset = BAR_USTRORM_INTMEM +
5092 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5095 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5097 REG_WR(bp, offset + j*4,
5098 ((u32 *)&rx_pause)[j]);
5102 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5104 /* Init rate shaping and fairness contexts */
5108 /* During init there is no active link
5109 Until link is up, set link rate to 10Gbps */
5110 bp->link_vars.line_speed = SPEED_10000;
5111 bnx2x_init_port_minmax(bp);
5113 bnx2x_calc_vn_weight_sum(bp);
5115 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5116 bnx2x_init_vn_minmax(bp, 2*vn + port);
5118 /* Enable rate shaping and fairness */
5119 bp->cmng.flags.cmng_enables =
5120 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5121 if (bp->vn_weight_sum)
5122 bp->cmng.flags.cmng_enables |=
5123 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5125 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5126 " fairness will be disabled\n");
5128 /* rate shaping and fairness are disabled */
5130 "single function mode minmax will be disabled\n");
5134 /* Store it to internal memory */
5136 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5137 REG_WR(bp, BAR_XSTRORM_INTMEM +
5138 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5139 ((u32 *)(&bp->cmng))[i]);
5142 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5144 switch (load_code) {
5145 case FW_MSG_CODE_DRV_LOAD_COMMON:
5146 bnx2x_init_internal_common(bp);
5149 case FW_MSG_CODE_DRV_LOAD_PORT:
5150 bnx2x_init_internal_port(bp);
5153 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5154 bnx2x_init_internal_func(bp);
5158 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5163 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5167 for_each_queue(bp, i) {
5168 struct bnx2x_fastpath *fp = &bp->fp[i];
5171 fp->state = BNX2X_FP_STATE_CLOSED;
5173 fp->cl_id = BP_L_ID(bp) + i;
5174 fp->sb_id = fp->cl_id;
5176 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5177 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5178 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5180 bnx2x_update_fpsb_idx(fp);
5183 /* ensure status block indices were read */
5187 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5189 bnx2x_update_dsb_idx(bp);
5190 bnx2x_update_coalesce(bp);
5191 bnx2x_init_rx_rings(bp);
5192 bnx2x_init_tx_ring(bp);
5193 bnx2x_init_sp_ring(bp);
5194 bnx2x_init_context(bp);
5195 bnx2x_init_internal(bp, load_code);
5196 bnx2x_init_ind_table(bp);
5197 bnx2x_stats_init(bp);
5199 /* At this point, we are ready for interrupts */
5200 atomic_set(&bp->intr_sem, 0);
5202 /* flush all before enabling interrupts */
5206 bnx2x_int_enable(bp);
5208 /* Check for SPIO5 */
5209 bnx2x_attn_int_deasserted0(bp,
5210 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5211 AEU_INPUTS_ATTN_BITS_SPIO5);
5214 /* end of nic init */
5217 * gzip service functions
5220 static int bnx2x_gunzip_init(struct bnx2x *bp)
5222 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5223 &bp->gunzip_mapping);
5224 if (bp->gunzip_buf == NULL)
5227 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5228 if (bp->strm == NULL)
5231 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5233 if (bp->strm->workspace == NULL)
5243 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5244 bp->gunzip_mapping);
5245 bp->gunzip_buf = NULL;
5248 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5249 " un-compression\n", bp->dev->name);
5253 static void bnx2x_gunzip_end(struct bnx2x *bp)
5255 kfree(bp->strm->workspace);
5260 if (bp->gunzip_buf) {
5261 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5262 bp->gunzip_mapping);
5263 bp->gunzip_buf = NULL;
5267 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5271 /* check gzip header */
5272 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5273 BNX2X_ERR("Bad gzip header\n");
5281 if (zbuf[3] & FNAME)
5282 while ((zbuf[n++] != 0) && (n < len));
5284 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5285 bp->strm->avail_in = len - n;
5286 bp->strm->next_out = bp->gunzip_buf;
5287 bp->strm->avail_out = FW_BUF_SIZE;
5289 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5293 rc = zlib_inflate(bp->strm, Z_FINISH);
5294 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5295 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5296 bp->dev->name, bp->strm->msg);
5298 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5299 if (bp->gunzip_outlen & 0x3)
5300 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5301 " gunzip_outlen (%d) not aligned\n",
5302 bp->dev->name, bp->gunzip_outlen);
5303 bp->gunzip_outlen >>= 2;
5305 zlib_inflateEnd(bp->strm);
5307 if (rc == Z_STREAM_END)
5313 /* nic load/unload */
5316 * General service functions
5319 /* send a NIG loopback debug packet */
5320 static void bnx2x_lb_pckt(struct bnx2x *bp)
5324 /* Ethernet source and destination addresses */
5325 wb_write[0] = 0x55555555;
5326 wb_write[1] = 0x55555555;
5327 wb_write[2] = 0x20; /* SOP */
5328 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5330 /* NON-IP protocol */
5331 wb_write[0] = 0x09000000;
5332 wb_write[1] = 0x55555555;
5333 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5334 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5337 /* some of the internal memories
5338 * are not directly readable from the driver
5339 * to test them we send debug packets
5341 static int bnx2x_int_mem_test(struct bnx2x *bp)
5347 if (CHIP_REV_IS_FPGA(bp))
5349 else if (CHIP_REV_IS_EMUL(bp))
5354 DP(NETIF_MSG_HW, "start part1\n");
5356 /* Disable inputs of parser neighbor blocks */
5357 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5358 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5359 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5360 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5362 /* Write 0 to parser credits for CFC search request */
5363 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5365 /* send Ethernet packet */
5368 /* TODO do i reset NIG statistic? */
5369 /* Wait until NIG register shows 1 packet of size 0x10 */
5370 count = 1000 * factor;
5373 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5374 val = *bnx2x_sp(bp, wb_data[0]);
5382 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5386 /* Wait until PRS register shows 1 packet */
5387 count = 1000 * factor;
5389 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5397 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5401 /* Reset and init BRB, PRS */
5402 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5404 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5406 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5407 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5409 DP(NETIF_MSG_HW, "part2\n");
5411 /* Disable inputs of parser neighbor blocks */
5412 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5413 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5414 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5415 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5417 /* Write 0 to parser credits for CFC search request */
5418 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5420 /* send 10 Ethernet packets */
5421 for (i = 0; i < 10; i++)
5424 /* Wait until NIG register shows 10 + 1
5425 packets of size 11*0x10 = 0xb0 */
5426 count = 1000 * factor;
5429 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5430 val = *bnx2x_sp(bp, wb_data[0]);
5438 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5442 /* Wait until PRS register shows 2 packets */
5443 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5445 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5447 /* Write 1 to parser credits for CFC search request */
5448 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5450 /* Wait until PRS register shows 3 packets */
5451 msleep(10 * factor);
5452 /* Wait until NIG register shows 1 packet of size 0x10 */
5453 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5455 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5457 /* clear NIG EOP FIFO */
5458 for (i = 0; i < 11; i++)
5459 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5460 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5462 BNX2X_ERR("clear of NIG failed\n");
5466 /* Reset and init BRB, PRS, NIG */
5467 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5469 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5471 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5472 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5475 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5478 /* Enable inputs of parser neighbor blocks */
5479 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5480 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5481 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5482 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5484 DP(NETIF_MSG_HW, "done\n");
5489 static void enable_blocks_attention(struct bnx2x *bp)
5491 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5492 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5493 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5494 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5495 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5496 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5497 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5498 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5499 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5500 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5501 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5502 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5503 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5504 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5505 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5506 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5507 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5508 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5509 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5510 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5511 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5512 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5513 if (CHIP_REV_IS_FPGA(bp))
5514 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5516 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5517 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5518 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5519 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5520 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5521 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5522 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5523 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5524 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5525 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5529 static void bnx2x_reset_common(struct bnx2x *bp)
5532 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5538 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5544 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5545 SHARED_HW_CFG_FAN_FAILURE_MASK;
5547 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5551 * The fan failure mechanism is usually related to the PHY type since
5552 * the power consumption of the board is affected by the PHY. Currently,
5553 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5555 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5556 for (port = PORT_0; port < PORT_MAX; port++) {
5558 SHMEM_RD(bp, dev_info.port_hw_config[port].
5559 external_phy_config) &
5560 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5563 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5565 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5568 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5570 if (is_required == 0)
5573 /* Fan failure is indicated by SPIO 5 */
5574 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5575 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5577 /* set to active low mode */
5578 val = REG_RD(bp, MISC_REG_SPIO_INT);
5579 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5580 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5581 REG_WR(bp, MISC_REG_SPIO_INT, val);
5583 /* enable interrupt to signal the IGU */
5584 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5585 val |= (1 << MISC_REGISTERS_SPIO_5);
5586 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5589 static int bnx2x_init_common(struct bnx2x *bp)
5593 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5595 bnx2x_reset_common(bp);
5596 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5597 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5599 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5600 if (CHIP_IS_E1H(bp))
5601 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5603 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5605 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5607 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5608 if (CHIP_IS_E1(bp)) {
5609 /* enable HW interrupt from PXP on USDM overflow
5610 bit 16 on INT_MASK_0 */
5611 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5614 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5618 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5619 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5620 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5621 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5622 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5623 /* make sure this value is 0 */
5624 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5626 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5627 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5628 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5629 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5630 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5633 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5635 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5636 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5637 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5640 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5641 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5643 /* let the HW do it's magic ... */
5645 /* finish PXP init */
5646 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5648 BNX2X_ERR("PXP2 CFG failed\n");
5651 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5653 BNX2X_ERR("PXP2 RD_INIT failed\n");
5657 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5658 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5660 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5662 /* clean the DMAE memory */
5664 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5666 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5667 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5668 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5669 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5671 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5672 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5673 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5674 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5676 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5677 /* soft reset pulse */
5678 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5679 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5682 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5685 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5686 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5687 if (!CHIP_REV_IS_SLOW(bp)) {
5688 /* enable hw interrupt from doorbell Q */
5689 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5692 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5693 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5694 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5696 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5697 if (CHIP_IS_E1H(bp))
5698 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5700 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5701 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5702 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5703 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5705 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5706 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5707 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5708 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5710 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5711 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5712 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5713 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5716 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5718 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5721 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5722 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5723 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5725 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5726 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5727 REG_WR(bp, i, 0xc0cac01a);
5728 /* TODO: replace with something meaningful */
5730 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5731 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5733 if (sizeof(union cdu_context) != 1024)
5734 /* we currently assume that a context is 1024 bytes */
5735 printk(KERN_ALERT PFX "please adjust the size of"
5736 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5738 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5739 val = (4 << 24) + (0 << 12) + 1024;
5740 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5741 if (CHIP_IS_E1(bp)) {
5742 /* !!! fix pxp client crdit until excel update */
5743 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5744 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5747 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5748 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5749 /* enable context validation interrupt from CFC */
5750 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5752 /* set the thresholds to prevent CFC/CDU race */
5753 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5755 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5756 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5758 /* PXPCS COMMON comes here */
5759 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5760 /* Reset PCIE errors for debug */
5761 REG_WR(bp, 0x2814, 0xffffffff);
5762 REG_WR(bp, 0x3820, 0xffffffff);
5764 /* EMAC0 COMMON comes here */
5765 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5766 /* EMAC1 COMMON comes here */
5767 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5768 /* DBU COMMON comes here */
5769 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5770 /* DBG COMMON comes here */
5771 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5773 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5774 if (CHIP_IS_E1H(bp)) {
5775 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5776 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5779 if (CHIP_REV_IS_SLOW(bp))
5782 /* finish CFC init */
5783 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5785 BNX2X_ERR("CFC LL_INIT failed\n");
5788 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5790 BNX2X_ERR("CFC AC_INIT failed\n");
5793 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5795 BNX2X_ERR("CFC CAM_INIT failed\n");
5798 REG_WR(bp, CFC_REG_DEBUG0, 0);
5800 /* read NIG statistic
5801 to see if this is our first up since powerup */
5802 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803 val = *bnx2x_sp(bp, wb_data[0]);
5805 /* do internal memory self test */
5806 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5807 BNX2X_ERR("internal mem self test failed\n");
5811 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5812 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5813 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5814 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5815 bp->port.need_hw_lock = 1;
5822 bnx2x_setup_fan_failure_detection(bp);
5824 /* clear PXP2 attentions */
5825 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5827 enable_blocks_attention(bp);
5829 if (!BP_NOMCP(bp)) {
5830 bnx2x_acquire_phy_lock(bp);
5831 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5832 bnx2x_release_phy_lock(bp);
5834 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5839 static int bnx2x_init_port(struct bnx2x *bp)
5841 int port = BP_PORT(bp);
5842 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5846 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5848 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5850 /* Port PXP comes here */
5851 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5852 /* Port PXP2 comes here */
5853 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5858 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5859 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5860 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5861 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5866 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5867 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5868 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5869 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5874 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5875 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5876 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5877 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5879 /* Port CMs come here */
5880 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5882 /* Port QM comes here */
5884 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5885 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5887 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5889 /* Port DQ comes here */
5890 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5892 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5893 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5894 /* no pause for emulation and FPGA */
5899 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5900 else if (bp->dev->mtu > 4096) {
5901 if (bp->flags & ONE_PORT_FLAG)
5905 /* (24*1024 + val*4)/256 */
5906 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5909 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5910 high = low + 56; /* 14*1024/256 */
5912 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5913 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5916 /* Port PRS comes here */
5917 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5918 /* Port TSDM comes here */
5919 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5920 /* Port CSDM comes here */
5921 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5922 /* Port USDM comes here */
5923 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5924 /* Port XSDM comes here */
5925 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5927 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5928 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5929 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5930 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5932 /* Port UPB comes here */
5933 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5934 /* Port XPB comes here */
5935 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5937 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5939 /* configure PBF to work without PAUSE mtu 9000 */
5940 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5942 /* update threshold */
5943 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5944 /* update init credit */
5945 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5948 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5950 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5953 /* tell the searcher where the T2 table is */
5954 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5956 wb_write[0] = U64_LO(bp->t2_mapping);
5957 wb_write[1] = U64_HI(bp->t2_mapping);
5958 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5959 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5960 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5961 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5963 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5964 /* Port SRCH comes here */
5966 /* Port CDU comes here */
5967 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5968 /* Port CFC comes here */
5969 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5971 if (CHIP_IS_E1(bp)) {
5972 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5973 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5975 bnx2x_init_block(bp, HC_BLOCK, init_stage);
5977 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5978 /* init aeu_mask_attn_func_0/1:
5979 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5980 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5981 * bits 4-7 are used for "per vn group attention" */
5982 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5983 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5985 /* Port PXPCS comes here */
5986 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5987 /* Port EMAC0 comes here */
5988 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5989 /* Port EMAC1 comes here */
5990 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5991 /* Port DBU comes here */
5992 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5993 /* Port DBG comes here */
5994 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5996 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5998 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6000 if (CHIP_IS_E1H(bp)) {
6001 /* 0x2 disable e1hov, 0x1 enable */
6002 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6003 (IS_E1HMF(bp) ? 0x1 : 0x2));
6005 /* support pause requests from USDM, TSDM and BRB */
6006 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6009 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6010 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6011 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6015 /* Port MCP comes here */
6016 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6017 /* Port DMAE comes here */
6018 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6020 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6023 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6025 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6026 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6028 /* The GPIO should be swapped if the swap register is
6030 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6031 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6033 /* Select function upon port-swap configuration */
6035 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6036 aeu_gpio_mask = (swap_val && swap_override) ?
6037 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6038 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6040 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6041 aeu_gpio_mask = (swap_val && swap_override) ?
6042 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6043 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6045 val = REG_RD(bp, offset);
6046 /* add GPIO3 to group */
6047 val |= aeu_gpio_mask;
6048 REG_WR(bp, offset, val);
6052 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6053 /* add SPIO 5 to group 0 */
6054 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6055 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6056 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6063 bnx2x__link_reset(bp);
6068 #define ILT_PER_FUNC (768/2)
6069 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6070 /* the phys address is shifted right 12 bits and has an added
6071 1=valid bit added to the 53rd bit
6072 then since this is a wide register(TM)
6073 we split it into two 32 bit writes
6075 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6076 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6077 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6078 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6080 #define CNIC_ILT_LINES 0
6082 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6086 if (CHIP_IS_E1H(bp))
6087 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6089 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6091 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6094 static int bnx2x_init_func(struct bnx2x *bp)
6096 int port = BP_PORT(bp);
6097 int func = BP_FUNC(bp);
6101 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6103 /* set MSI reconfigure capability */
6104 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6105 val = REG_RD(bp, addr);
6106 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6107 REG_WR(bp, addr, val);
6109 i = FUNC_ILT_BASE(func);
6111 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6112 if (CHIP_IS_E1H(bp)) {
6113 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6114 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6116 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6117 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6120 if (CHIP_IS_E1H(bp)) {
6121 for (i = 0; i < 9; i++)
6122 bnx2x_init_block(bp,
6123 cm_blocks[i], FUNC0_STAGE + func);
6125 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6126 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6129 /* HC init per function */
6130 if (CHIP_IS_E1H(bp)) {
6131 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6133 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6134 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6136 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6138 /* Reset PCIE errors for debug */
6139 REG_WR(bp, 0x2114, 0xffffffff);
6140 REG_WR(bp, 0x2120, 0xffffffff);
6145 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6149 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6150 BP_FUNC(bp), load_code);
6153 mutex_init(&bp->dmae_mutex);
6154 bnx2x_gunzip_init(bp);
6156 switch (load_code) {
6157 case FW_MSG_CODE_DRV_LOAD_COMMON:
6158 rc = bnx2x_init_common(bp);
6163 case FW_MSG_CODE_DRV_LOAD_PORT:
6165 rc = bnx2x_init_port(bp);
6170 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6172 rc = bnx2x_init_func(bp);
6178 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6182 if (!BP_NOMCP(bp)) {
6183 int func = BP_FUNC(bp);
6185 bp->fw_drv_pulse_wr_seq =
6186 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6187 DRV_PULSE_SEQ_MASK);
6188 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6189 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6190 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6194 /* this needs to be done before gunzip end */
6195 bnx2x_zero_def_sb(bp);
6196 for_each_queue(bp, i)
6197 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6200 bnx2x_gunzip_end(bp);
6205 /* send the MCP a request, block until there is a reply */
6206 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6208 int func = BP_FUNC(bp);
6209 u32 seq = ++bp->fw_seq;
6212 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6214 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6215 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6218 /* let the FW do it's magic ... */
6221 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6223 /* Give the FW up to 2 second (200*10ms) */
6224 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6226 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6227 cnt*delay, rc, seq);
6229 /* is this a reply to our command? */
6230 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6231 rc &= FW_MSG_CODE_MASK;
6235 BNX2X_ERR("FW failed to respond!\n");
6243 static void bnx2x_free_mem(struct bnx2x *bp)
6246 #define BNX2X_PCI_FREE(x, y, size) \
6249 pci_free_consistent(bp->pdev, size, x, y); \
6255 #define BNX2X_FREE(x) \
6267 for_each_queue(bp, i) {
6270 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6271 bnx2x_fp(bp, i, status_blk_mapping),
6272 sizeof(struct host_status_block) +
6273 sizeof(struct eth_tx_db_data));
6276 for_each_rx_queue(bp, i) {
6278 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6279 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6280 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6281 bnx2x_fp(bp, i, rx_desc_mapping),
6282 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6284 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6285 bnx2x_fp(bp, i, rx_comp_mapping),
6286 sizeof(struct eth_fast_path_rx_cqe) *
6290 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6291 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6292 bnx2x_fp(bp, i, rx_sge_mapping),
6293 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6296 for_each_tx_queue(bp, i) {
6298 /* fastpath tx rings: tx_buf tx_desc */
6299 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6300 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6301 bnx2x_fp(bp, i, tx_desc_mapping),
6302 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6304 /* end of fastpath */
6306 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6307 sizeof(struct host_def_status_block));
6309 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6310 sizeof(struct bnx2x_slowpath));
6313 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6314 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6315 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6316 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6318 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6320 #undef BNX2X_PCI_FREE
6324 static int bnx2x_alloc_mem(struct bnx2x *bp)
6327 #define BNX2X_PCI_ALLOC(x, y, size) \
6329 x = pci_alloc_consistent(bp->pdev, size, y); \
6331 goto alloc_mem_err; \
6332 memset(x, 0, size); \
6335 #define BNX2X_ALLOC(x, size) \
6337 x = vmalloc(size); \
6339 goto alloc_mem_err; \
6340 memset(x, 0, size); \
6347 for_each_queue(bp, i) {
6348 bnx2x_fp(bp, i, bp) = bp;
6351 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6352 &bnx2x_fp(bp, i, status_blk_mapping),
6353 sizeof(struct host_status_block) +
6354 sizeof(struct eth_tx_db_data));
6357 for_each_rx_queue(bp, i) {
6359 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6360 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6361 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6362 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6363 &bnx2x_fp(bp, i, rx_desc_mapping),
6364 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6366 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6367 &bnx2x_fp(bp, i, rx_comp_mapping),
6368 sizeof(struct eth_fast_path_rx_cqe) *
6372 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6373 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6374 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6375 &bnx2x_fp(bp, i, rx_sge_mapping),
6376 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6379 for_each_tx_queue(bp, i) {
6381 bnx2x_fp(bp, i, hw_tx_prods) =
6382 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6384 bnx2x_fp(bp, i, tx_prods_mapping) =
6385 bnx2x_fp(bp, i, status_blk_mapping) +
6386 sizeof(struct host_status_block);
6388 /* fastpath tx rings: tx_buf tx_desc */
6389 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6390 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6391 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6392 &bnx2x_fp(bp, i, tx_desc_mapping),
6393 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6395 /* end of fastpath */
6397 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6398 sizeof(struct host_def_status_block));
6400 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6401 sizeof(struct bnx2x_slowpath));
6404 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6407 for (i = 0; i < 64*1024; i += 64) {
6408 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6409 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6412 /* allocate searcher T2 table
6413 we allocate 1/4 of alloc num for T2
6414 (which is not entered into the ILT) */
6415 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6418 for (i = 0; i < 16*1024; i += 64)
6419 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6421 /* now fixup the last line in the block to point to the next block */
6422 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6424 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6425 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6427 /* QM queues (128*MAX_CONN) */
6428 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6431 /* Slow path ring */
6432 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6440 #undef BNX2X_PCI_ALLOC
6444 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6448 for_each_tx_queue(bp, i) {
6449 struct bnx2x_fastpath *fp = &bp->fp[i];
6451 u16 bd_cons = fp->tx_bd_cons;
6452 u16 sw_prod = fp->tx_pkt_prod;
6453 u16 sw_cons = fp->tx_pkt_cons;
6455 while (sw_cons != sw_prod) {
6456 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6462 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6466 for_each_rx_queue(bp, j) {
6467 struct bnx2x_fastpath *fp = &bp->fp[j];
6469 for (i = 0; i < NUM_RX_BD; i++) {
6470 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6471 struct sk_buff *skb = rx_buf->skb;
6476 pci_unmap_single(bp->pdev,
6477 pci_unmap_addr(rx_buf, mapping),
6478 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6483 if (!fp->disable_tpa)
6484 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6485 ETH_MAX_AGGREGATION_QUEUES_E1 :
6486 ETH_MAX_AGGREGATION_QUEUES_E1H);
6490 static void bnx2x_free_skbs(struct bnx2x *bp)
6492 bnx2x_free_tx_skbs(bp);
6493 bnx2x_free_rx_skbs(bp);
6496 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6500 free_irq(bp->msix_table[0].vector, bp->dev);
6501 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6502 bp->msix_table[0].vector);
6504 for_each_queue(bp, i) {
6505 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6506 "state %x\n", i, bp->msix_table[i + offset].vector,
6507 bnx2x_fp(bp, i, state));
6509 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6513 static void bnx2x_free_irq(struct bnx2x *bp)
6515 if (bp->flags & USING_MSIX_FLAG) {
6516 bnx2x_free_msix_irqs(bp);
6517 pci_disable_msix(bp->pdev);
6518 bp->flags &= ~USING_MSIX_FLAG;
6520 } else if (bp->flags & USING_MSI_FLAG) {
6521 free_irq(bp->pdev->irq, bp->dev);
6522 pci_disable_msi(bp->pdev);
6523 bp->flags &= ~USING_MSI_FLAG;
6526 free_irq(bp->pdev->irq, bp->dev);
6529 static int bnx2x_enable_msix(struct bnx2x *bp)
6531 int i, rc, offset = 1;
6534 bp->msix_table[0].entry = igu_vec;
6535 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6537 for_each_queue(bp, i) {
6538 igu_vec = BP_L_ID(bp) + offset + i;
6539 bp->msix_table[i + offset].entry = igu_vec;
6540 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6541 "(fastpath #%u)\n", i + offset, igu_vec, i);
6544 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6545 BNX2X_NUM_QUEUES(bp) + offset);
6547 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6551 bp->flags |= USING_MSIX_FLAG;
6556 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6558 int i, rc, offset = 1;
6560 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6561 bp->dev->name, bp->dev);
6563 BNX2X_ERR("request sp irq failed\n");
6567 for_each_queue(bp, i) {
6568 struct bnx2x_fastpath *fp = &bp->fp[i];
6570 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6571 rc = request_irq(bp->msix_table[i + offset].vector,
6572 bnx2x_msix_fp_int, 0, fp->name, fp);
6574 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6575 bnx2x_free_msix_irqs(bp);
6579 fp->state = BNX2X_FP_STATE_IRQ;
6582 i = BNX2X_NUM_QUEUES(bp);
6584 printk(KERN_INFO PFX
6585 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6586 bp->dev->name, bp->msix_table[0].vector,
6587 bp->msix_table[offset].vector,
6588 bp->msix_table[offset + i - 1].vector);
6590 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6591 bp->dev->name, bp->msix_table[0].vector,
6592 bp->msix_table[offset + i - 1].vector);
6597 static int bnx2x_enable_msi(struct bnx2x *bp)
6601 rc = pci_enable_msi(bp->pdev);
6603 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6606 bp->flags |= USING_MSI_FLAG;
6611 static int bnx2x_req_irq(struct bnx2x *bp)
6613 unsigned long flags;
6616 if (bp->flags & USING_MSI_FLAG)
6619 flags = IRQF_SHARED;
6621 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6622 bp->dev->name, bp->dev);
6624 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6629 static void bnx2x_napi_enable(struct bnx2x *bp)
6633 for_each_rx_queue(bp, i)
6634 napi_enable(&bnx2x_fp(bp, i, napi));
6637 static void bnx2x_napi_disable(struct bnx2x *bp)
6641 for_each_rx_queue(bp, i)
6642 napi_disable(&bnx2x_fp(bp, i, napi));
6645 static void bnx2x_netif_start(struct bnx2x *bp)
6649 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6650 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6653 if (netif_running(bp->dev)) {
6654 bnx2x_napi_enable(bp);
6655 bnx2x_int_enable(bp);
6656 if (bp->state == BNX2X_STATE_OPEN)
6657 netif_tx_wake_all_queues(bp->dev);
6662 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6664 bnx2x_int_disable_sync(bp, disable_hw);
6665 bnx2x_napi_disable(bp);
6666 netif_tx_disable(bp->dev);
6667 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6671 * Init service functions
6674 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6676 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6677 int port = BP_PORT(bp);
6680 * unicasts 0-31:port0 32-63:port1
6681 * multicast 64-127:port0 128-191:port1
6683 config->hdr.length = 2;
6684 config->hdr.offset = port ? 32 : 0;
6685 config->hdr.client_id = bp->fp->cl_id;
6686 config->hdr.reserved1 = 0;
6689 config->config_table[0].cam_entry.msb_mac_addr =
6690 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6691 config->config_table[0].cam_entry.middle_mac_addr =
6692 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6693 config->config_table[0].cam_entry.lsb_mac_addr =
6694 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6695 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6697 config->config_table[0].target_table_entry.flags = 0;
6699 CAM_INVALIDATE(config->config_table[0]);
6700 config->config_table[0].target_table_entry.client_id = 0;
6701 config->config_table[0].target_table_entry.vlan_id = 0;
6703 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6704 (set ? "setting" : "clearing"),
6705 config->config_table[0].cam_entry.msb_mac_addr,
6706 config->config_table[0].cam_entry.middle_mac_addr,
6707 config->config_table[0].cam_entry.lsb_mac_addr);
6710 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6711 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6712 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6713 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6715 config->config_table[1].target_table_entry.flags =
6716 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6718 CAM_INVALIDATE(config->config_table[1]);
6719 config->config_table[1].target_table_entry.client_id = 0;
6720 config->config_table[1].target_table_entry.vlan_id = 0;
6722 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6723 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6724 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6727 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6729 struct mac_configuration_cmd_e1h *config =
6730 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6732 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6733 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6737 /* CAM allocation for E1H
6738 * unicasts: by func number
6739 * multicast: 20+FUNC*20, 20 each
6741 config->hdr.length = 1;
6742 config->hdr.offset = BP_FUNC(bp);
6743 config->hdr.client_id = bp->fp->cl_id;
6744 config->hdr.reserved1 = 0;
6747 config->config_table[0].msb_mac_addr =
6748 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6749 config->config_table[0].middle_mac_addr =
6750 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6751 config->config_table[0].lsb_mac_addr =
6752 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6753 config->config_table[0].client_id = BP_L_ID(bp);
6754 config->config_table[0].vlan_id = 0;
6755 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6757 config->config_table[0].flags = BP_PORT(bp);
6759 config->config_table[0].flags =
6760 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6762 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6763 (set ? "setting" : "clearing"),
6764 config->config_table[0].msb_mac_addr,
6765 config->config_table[0].middle_mac_addr,
6766 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6768 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6769 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6770 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6773 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6774 int *state_p, int poll)
6776 /* can take a while if any port is running */
6779 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6780 poll ? "polling" : "waiting", state, idx);
6785 bnx2x_rx_int(bp->fp, 10);
6786 /* if index is different from 0
6787 * the reply for some commands will
6788 * be on the non default queue
6791 bnx2x_rx_int(&bp->fp[idx], 10);
6794 mb(); /* state is changed by bnx2x_sp_event() */
6795 if (*state_p == state) {
6796 #ifdef BNX2X_STOP_ON_ERROR
6797 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6806 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6807 poll ? "polling" : "waiting", state, idx);
6808 #ifdef BNX2X_STOP_ON_ERROR
6815 static int bnx2x_setup_leading(struct bnx2x *bp)
6819 /* reset IGU state */
6820 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6823 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6825 /* Wait for completion */
6826 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6831 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6833 struct bnx2x_fastpath *fp = &bp->fp[index];
6835 /* reset IGU state */
6836 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6839 fp->state = BNX2X_FP_STATE_OPENING;
6840 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6843 /* Wait for completion */
6844 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6848 static int bnx2x_poll(struct napi_struct *napi, int budget);
6850 static void bnx2x_set_int_mode(struct bnx2x *bp)
6858 bp->num_rx_queues = num_queues;
6859 bp->num_tx_queues = num_queues;
6861 "set number of queues to %d\n", num_queues);
6866 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6867 num_queues = min_t(u32, num_online_cpus(),
6868 BNX2X_MAX_QUEUES(bp));
6871 bp->num_rx_queues = num_queues;
6872 bp->num_tx_queues = num_queues;
6873 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6874 " number of tx queues to %d\n",
6875 bp->num_rx_queues, bp->num_tx_queues);
6876 /* if we can't use MSI-X we only need one fp,
6877 * so try to enable MSI-X with the requested number of fp's
6878 * and fallback to MSI or legacy INTx with one fp
6880 if (bnx2x_enable_msix(bp)) {
6881 /* failed to enable MSI-X */
6883 bp->num_rx_queues = num_queues;
6884 bp->num_tx_queues = num_queues;
6886 BNX2X_ERR("Multi requested but failed to "
6887 "enable MSI-X set number of "
6888 "queues to %d\n", num_queues);
6892 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6895 static void bnx2x_set_rx_mode(struct net_device *dev);
6897 /* must be called with rtnl_lock */
6898 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6902 #ifdef BNX2X_STOP_ON_ERROR
6903 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6904 if (unlikely(bp->panic))
6908 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6910 bnx2x_set_int_mode(bp);
6912 if (bnx2x_alloc_mem(bp))
6915 for_each_rx_queue(bp, i)
6916 bnx2x_fp(bp, i, disable_tpa) =
6917 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6919 for_each_rx_queue(bp, i)
6920 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6923 #ifdef BNX2X_STOP_ON_ERROR
6924 for_each_rx_queue(bp, i) {
6925 struct bnx2x_fastpath *fp = &bp->fp[i];
6927 fp->poll_no_work = 0;
6929 fp->poll_max_calls = 0;
6930 fp->poll_complete = 0;
6934 bnx2x_napi_enable(bp);
6936 if (bp->flags & USING_MSIX_FLAG) {
6937 rc = bnx2x_req_msix_irqs(bp);
6939 pci_disable_msix(bp->pdev);
6943 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6944 bnx2x_enable_msi(bp);
6946 rc = bnx2x_req_irq(bp);
6948 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6949 if (bp->flags & USING_MSI_FLAG)
6950 pci_disable_msi(bp->pdev);
6953 if (bp->flags & USING_MSI_FLAG) {
6954 bp->dev->irq = bp->pdev->irq;
6955 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6956 bp->dev->name, bp->pdev->irq);
6960 /* Send LOAD_REQUEST command to MCP
6961 Returns the type of LOAD command:
6962 if it is the first port to be initialized
6963 common blocks should be initialized, otherwise - not
6965 if (!BP_NOMCP(bp)) {
6966 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6968 BNX2X_ERR("MCP response failure, aborting\n");
6972 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6973 rc = -EBUSY; /* other port in diagnostic mode */
6978 int port = BP_PORT(bp);
6980 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
6981 load_count[0], load_count[1], load_count[2]);
6983 load_count[1 + port]++;
6984 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
6985 load_count[0], load_count[1], load_count[2]);
6986 if (load_count[0] == 1)
6987 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6988 else if (load_count[1 + port] == 1)
6989 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6991 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6994 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6995 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6999 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7002 rc = bnx2x_init_hw(bp, load_code);
7004 BNX2X_ERR("HW init failed, aborting\n");
7008 /* Setup NIC internals and enable interrupts */
7009 bnx2x_nic_init(bp, load_code);
7011 /* Send LOAD_DONE command to MCP */
7012 if (!BP_NOMCP(bp)) {
7013 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7015 BNX2X_ERR("MCP response failure, aborting\n");
7021 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7023 rc = bnx2x_setup_leading(bp);
7025 BNX2X_ERR("Setup leading failed!\n");
7029 if (CHIP_IS_E1H(bp))
7030 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7031 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7032 bp->state = BNX2X_STATE_DISABLED;
7035 if (bp->state == BNX2X_STATE_OPEN)
7036 for_each_nondefault_queue(bp, i) {
7037 rc = bnx2x_setup_multi(bp, i);
7043 bnx2x_set_mac_addr_e1(bp, 1);
7045 bnx2x_set_mac_addr_e1h(bp, 1);
7048 bnx2x_initial_phy_init(bp, load_mode);
7050 /* Start fast path */
7051 switch (load_mode) {
7053 /* Tx queue should be only reenabled */
7054 netif_tx_wake_all_queues(bp->dev);
7055 /* Initialize the receive filter. */
7056 bnx2x_set_rx_mode(bp->dev);
7060 netif_tx_start_all_queues(bp->dev);
7061 /* Initialize the receive filter. */
7062 bnx2x_set_rx_mode(bp->dev);
7066 /* Initialize the receive filter. */
7067 bnx2x_set_rx_mode(bp->dev);
7068 bp->state = BNX2X_STATE_DIAG;
7076 bnx2x__link_status_update(bp);
7078 /* start the timer */
7079 mod_timer(&bp->timer, jiffies + bp->current_interval);
7085 bnx2x_int_disable_sync(bp, 1);
7086 if (!BP_NOMCP(bp)) {
7087 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7088 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7091 /* Free SKBs, SGEs, TPA pool and driver internals */
7092 bnx2x_free_skbs(bp);
7093 for_each_rx_queue(bp, i)
7094 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7099 bnx2x_napi_disable(bp);
7100 for_each_rx_queue(bp, i)
7101 netif_napi_del(&bnx2x_fp(bp, i, napi));
7107 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7109 struct bnx2x_fastpath *fp = &bp->fp[index];
7112 /* halt the connection */
7113 fp->state = BNX2X_FP_STATE_HALTING;
7114 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7116 /* Wait for completion */
7117 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7119 if (rc) /* timeout */
7122 /* delete cfc entry */
7123 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7125 /* Wait for completion */
7126 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7131 static int bnx2x_stop_leading(struct bnx2x *bp)
7133 __le16 dsb_sp_prod_idx;
7134 /* if the other port is handling traffic,
7135 this can take a lot of time */
7141 /* Send HALT ramrod */
7142 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7143 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7145 /* Wait for completion */
7146 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7147 &(bp->fp[0].state), 1);
7148 if (rc) /* timeout */
7151 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7153 /* Send PORT_DELETE ramrod */
7154 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7156 /* Wait for completion to arrive on default status block
7157 we are going to reset the chip anyway
7158 so there is not much to do if this times out
7160 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7162 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7163 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7164 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7165 #ifdef BNX2X_STOP_ON_ERROR
7173 rmb(); /* Refresh the dsb_sp_prod */
7175 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7176 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7181 static void bnx2x_reset_func(struct bnx2x *bp)
7183 int port = BP_PORT(bp);
7184 int func = BP_FUNC(bp);
7188 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7189 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7192 base = FUNC_ILT_BASE(func);
7193 for (i = base; i < base + ILT_PER_FUNC; i++)
7194 bnx2x_ilt_wr(bp, i, 0);
7197 static void bnx2x_reset_port(struct bnx2x *bp)
7199 int port = BP_PORT(bp);
7202 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7204 /* Do not rcv packets to BRB */
7205 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7206 /* Do not direct rcv packets that are not for MCP to the BRB */
7207 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7208 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7211 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7214 /* Check for BRB port occupancy */
7215 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7217 DP(NETIF_MSG_IFDOWN,
7218 "BRB1 is not empty %d blocks are occupied\n", val);
7220 /* TODO: Close Doorbell port? */
7223 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7225 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7226 BP_FUNC(bp), reset_code);
7228 switch (reset_code) {
7229 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7230 bnx2x_reset_port(bp);
7231 bnx2x_reset_func(bp);
7232 bnx2x_reset_common(bp);
7235 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7236 bnx2x_reset_port(bp);
7237 bnx2x_reset_func(bp);
7240 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7241 bnx2x_reset_func(bp);
7245 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7250 /* must be called with rtnl_lock */
7251 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7253 int port = BP_PORT(bp);
7257 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7259 bp->rx_mode = BNX2X_RX_MODE_NONE;
7260 bnx2x_set_storm_rx_mode(bp);
7262 bnx2x_netif_stop(bp, 1);
7264 del_timer_sync(&bp->timer);
7265 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7266 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7267 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7272 /* Wait until tx fastpath tasks complete */
7273 for_each_tx_queue(bp, i) {
7274 struct bnx2x_fastpath *fp = &bp->fp[i];
7277 while (bnx2x_has_tx_work_unload(fp)) {
7281 BNX2X_ERR("timeout waiting for queue[%d]\n",
7283 #ifdef BNX2X_STOP_ON_ERROR
7294 /* Give HW time to discard old tx messages */
7297 if (CHIP_IS_E1(bp)) {
7298 struct mac_configuration_cmd *config =
7299 bnx2x_sp(bp, mcast_config);
7301 bnx2x_set_mac_addr_e1(bp, 0);
7303 for (i = 0; i < config->hdr.length; i++)
7304 CAM_INVALIDATE(config->config_table[i]);
7306 config->hdr.length = i;
7307 if (CHIP_REV_IS_SLOW(bp))
7308 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7310 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7311 config->hdr.client_id = bp->fp->cl_id;
7312 config->hdr.reserved1 = 0;
7314 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7315 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7316 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7319 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7321 bnx2x_set_mac_addr_e1h(bp, 0);
7323 for (i = 0; i < MC_HASH_SIZE; i++)
7324 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7327 if (unload_mode == UNLOAD_NORMAL)
7328 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7330 else if (bp->flags & NO_WOL_FLAG) {
7331 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7332 if (CHIP_IS_E1H(bp))
7333 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7335 } else if (bp->wol) {
7336 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7337 u8 *mac_addr = bp->dev->dev_addr;
7339 /* The mac address is written to entries 1-4 to
7340 preserve entry 0 which is used by the PMF */
7341 u8 entry = (BP_E1HVN(bp) + 1)*8;
7343 val = (mac_addr[0] << 8) | mac_addr[1];
7344 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7346 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7347 (mac_addr[4] << 8) | mac_addr[5];
7348 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7350 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7353 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7355 /* Close multi and leading connections
7356 Completions for ramrods are collected in a synchronous way */
7357 for_each_nondefault_queue(bp, i)
7358 if (bnx2x_stop_multi(bp, i))
7361 rc = bnx2x_stop_leading(bp);
7363 BNX2X_ERR("Stop leading failed!\n");
7364 #ifdef BNX2X_STOP_ON_ERROR
7373 reset_code = bnx2x_fw_command(bp, reset_code);
7375 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7376 load_count[0], load_count[1], load_count[2]);
7378 load_count[1 + port]--;
7379 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7380 load_count[0], load_count[1], load_count[2]);
7381 if (load_count[0] == 0)
7382 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7383 else if (load_count[1 + port] == 0)
7384 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7386 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7389 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7390 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7391 bnx2x__link_reset(bp);
7393 /* Reset the chip */
7394 bnx2x_reset_chip(bp, reset_code);
7396 /* Report UNLOAD_DONE to MCP */
7398 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7402 /* Free SKBs, SGEs, TPA pool and driver internals */
7403 bnx2x_free_skbs(bp);
7404 for_each_rx_queue(bp, i)
7405 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7406 for_each_rx_queue(bp, i)
7407 netif_napi_del(&bnx2x_fp(bp, i, napi));
7410 bp->state = BNX2X_STATE_CLOSED;
7412 netif_carrier_off(bp->dev);
7417 static void bnx2x_reset_task(struct work_struct *work)
7419 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7421 #ifdef BNX2X_STOP_ON_ERROR
7422 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7423 " so reset not done to allow debug dump,\n"
7424 KERN_ERR " you will need to reboot when done\n");
7430 if (!netif_running(bp->dev))
7431 goto reset_task_exit;
7433 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7434 bnx2x_nic_load(bp, LOAD_NORMAL);
7440 /* end of nic load/unload */
7445 * Init service functions
7448 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7451 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7452 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7453 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7454 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7455 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7456 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7457 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7458 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7460 BNX2X_ERR("Unsupported function index: %d\n", func);
7465 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7467 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7469 /* Flush all outstanding writes */
7472 /* Pretend to be function 0 */
7474 /* Flush the GRC transaction (in the chip) */
7475 new_val = REG_RD(bp, reg);
7477 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7482 /* From now we are in the "like-E1" mode */
7483 bnx2x_int_disable(bp);
7485 /* Flush all outstanding writes */
7488 /* Restore the original funtion settings */
7489 REG_WR(bp, reg, orig_func);
7490 new_val = REG_RD(bp, reg);
7491 if (new_val != orig_func) {
7492 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7493 orig_func, new_val);
7498 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7500 if (CHIP_IS_E1H(bp))
7501 bnx2x_undi_int_disable_e1h(bp, func);
7503 bnx2x_int_disable(bp);
7506 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7510 /* Check if there is any driver already loaded */
7511 val = REG_RD(bp, MISC_REG_UNPREPARED);
7513 /* Check if it is the UNDI driver
7514 * UNDI driver initializes CID offset for normal bell to 0x7
7516 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7517 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7519 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7521 int func = BP_FUNC(bp);
7525 /* clear the UNDI indication */
7526 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7528 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7530 /* try unload UNDI on port 0 */
7533 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7534 DRV_MSG_SEQ_NUMBER_MASK);
7535 reset_code = bnx2x_fw_command(bp, reset_code);
7537 /* if UNDI is loaded on the other port */
7538 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7540 /* send "DONE" for previous unload */
7541 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7543 /* unload UNDI on port 1 */
7546 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7547 DRV_MSG_SEQ_NUMBER_MASK);
7548 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7550 bnx2x_fw_command(bp, reset_code);
7553 /* now it's safe to release the lock */
7554 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7556 bnx2x_undi_int_disable(bp, func);
7558 /* close input traffic and wait for it */
7559 /* Do not rcv packets to BRB */
7561 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7562 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7563 /* Do not direct rcv packets that are not for MCP to
7566 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7567 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7570 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7571 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7574 /* save NIG port swap info */
7575 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7576 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7579 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7582 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7584 /* take the NIG out of reset and restore swap values */
7586 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7587 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7588 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7589 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7591 /* send unload done to the MCP */
7592 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7594 /* restore our func and fw_seq */
7597 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7598 DRV_MSG_SEQ_NUMBER_MASK);
7601 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7605 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7607 u32 val, val2, val3, val4, id;
7610 /* Get the chip revision id and number. */
7611 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7612 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7613 id = ((val & 0xffff) << 16);
7614 val = REG_RD(bp, MISC_REG_CHIP_REV);
7615 id |= ((val & 0xf) << 12);
7616 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7617 id |= ((val & 0xff) << 4);
7618 val = REG_RD(bp, MISC_REG_BOND_ID);
7620 bp->common.chip_id = id;
7621 bp->link_params.chip_id = bp->common.chip_id;
7622 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7624 val = (REG_RD(bp, 0x2874) & 0x55);
7625 if ((bp->common.chip_id & 0x1) ||
7626 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7627 bp->flags |= ONE_PORT_FLAG;
7628 BNX2X_DEV_INFO("single port device\n");
7631 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7632 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7633 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7634 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7635 bp->common.flash_size, bp->common.flash_size);
7637 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7638 bp->link_params.shmem_base = bp->common.shmem_base;
7639 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7641 if (!bp->common.shmem_base ||
7642 (bp->common.shmem_base < 0xA0000) ||
7643 (bp->common.shmem_base >= 0xC0000)) {
7644 BNX2X_DEV_INFO("MCP not active\n");
7645 bp->flags |= NO_MCP_FLAG;
7649 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7650 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7651 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7652 BNX2X_ERR("BAD MCP validity signature\n");
7654 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7655 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7657 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7658 SHARED_HW_CFG_LED_MODE_MASK) >>
7659 SHARED_HW_CFG_LED_MODE_SHIFT);
7661 bp->link_params.feature_config_flags = 0;
7662 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7663 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7664 bp->link_params.feature_config_flags |=
7665 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7667 bp->link_params.feature_config_flags &=
7668 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7670 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7671 bp->common.bc_ver = val;
7672 BNX2X_DEV_INFO("bc_ver %X\n", val);
7673 if (val < BNX2X_BC_VER) {
7674 /* for now only warn
7675 * later we might need to enforce this */
7676 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7677 " please upgrade BC\n", BNX2X_BC_VER, val);
7680 if (BP_E1HVN(bp) == 0) {
7681 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7682 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7684 /* no WOL capability for E1HVN != 0 */
7685 bp->flags |= NO_WOL_FLAG;
7687 BNX2X_DEV_INFO("%sWoL capable\n",
7688 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7690 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7691 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7692 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7693 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7695 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7696 val, val2, val3, val4);
7699 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7702 int port = BP_PORT(bp);
7705 switch (switch_cfg) {
7707 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7710 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7711 switch (ext_phy_type) {
7712 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7713 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7716 bp->port.supported |= (SUPPORTED_10baseT_Half |
7717 SUPPORTED_10baseT_Full |
7718 SUPPORTED_100baseT_Half |
7719 SUPPORTED_100baseT_Full |
7720 SUPPORTED_1000baseT_Full |
7721 SUPPORTED_2500baseX_Full |
7726 SUPPORTED_Asym_Pause);
7729 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7730 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7733 bp->port.supported |= (SUPPORTED_10baseT_Half |
7734 SUPPORTED_10baseT_Full |
7735 SUPPORTED_100baseT_Half |
7736 SUPPORTED_100baseT_Full |
7737 SUPPORTED_1000baseT_Full |
7742 SUPPORTED_Asym_Pause);
7746 BNX2X_ERR("NVRAM config error. "
7747 "BAD SerDes ext_phy_config 0x%x\n",
7748 bp->link_params.ext_phy_config);
7752 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7754 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7757 case SWITCH_CFG_10G:
7758 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7761 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7762 switch (ext_phy_type) {
7763 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7764 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7767 bp->port.supported |= (SUPPORTED_10baseT_Half |
7768 SUPPORTED_10baseT_Full |
7769 SUPPORTED_100baseT_Half |
7770 SUPPORTED_100baseT_Full |
7771 SUPPORTED_1000baseT_Full |
7772 SUPPORTED_2500baseX_Full |
7773 SUPPORTED_10000baseT_Full |
7778 SUPPORTED_Asym_Pause);
7781 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7782 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7785 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7786 SUPPORTED_1000baseT_Full |
7790 SUPPORTED_Asym_Pause);
7793 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7794 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7797 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7798 SUPPORTED_2500baseX_Full |
7799 SUPPORTED_1000baseT_Full |
7803 SUPPORTED_Asym_Pause);
7806 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7807 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7810 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7813 SUPPORTED_Asym_Pause);
7816 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7817 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7820 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7821 SUPPORTED_1000baseT_Full |
7824 SUPPORTED_Asym_Pause);
7827 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7828 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7831 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7832 SUPPORTED_1000baseT_Full |
7836 SUPPORTED_Asym_Pause);
7839 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7840 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7843 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7847 SUPPORTED_Asym_Pause);
7850 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7851 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7854 bp->port.supported |= (SUPPORTED_10baseT_Half |
7855 SUPPORTED_10baseT_Full |
7856 SUPPORTED_100baseT_Half |
7857 SUPPORTED_100baseT_Full |
7858 SUPPORTED_1000baseT_Full |
7859 SUPPORTED_10000baseT_Full |
7863 SUPPORTED_Asym_Pause);
7866 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7867 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7868 bp->link_params.ext_phy_config);
7872 BNX2X_ERR("NVRAM config error. "
7873 "BAD XGXS ext_phy_config 0x%x\n",
7874 bp->link_params.ext_phy_config);
7878 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7880 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7885 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7886 bp->port.link_config);
7889 bp->link_params.phy_addr = bp->port.phy_addr;
7891 /* mask what we support according to speed_cap_mask */
7892 if (!(bp->link_params.speed_cap_mask &
7893 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7894 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7896 if (!(bp->link_params.speed_cap_mask &
7897 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7898 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7900 if (!(bp->link_params.speed_cap_mask &
7901 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7902 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7904 if (!(bp->link_params.speed_cap_mask &
7905 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7906 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7908 if (!(bp->link_params.speed_cap_mask &
7909 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7910 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7911 SUPPORTED_1000baseT_Full);
7913 if (!(bp->link_params.speed_cap_mask &
7914 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7915 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7917 if (!(bp->link_params.speed_cap_mask &
7918 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7919 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7921 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7924 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7926 bp->link_params.req_duplex = DUPLEX_FULL;
7928 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7929 case PORT_FEATURE_LINK_SPEED_AUTO:
7930 if (bp->port.supported & SUPPORTED_Autoneg) {
7931 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7932 bp->port.advertising = bp->port.supported;
7935 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7937 if ((ext_phy_type ==
7938 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7940 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7941 /* force 10G, no AN */
7942 bp->link_params.req_line_speed = SPEED_10000;
7943 bp->port.advertising =
7944 (ADVERTISED_10000baseT_Full |
7948 BNX2X_ERR("NVRAM config error. "
7949 "Invalid link_config 0x%x"
7950 " Autoneg not supported\n",
7951 bp->port.link_config);
7956 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7957 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7958 bp->link_params.req_line_speed = SPEED_10;
7959 bp->port.advertising = (ADVERTISED_10baseT_Full |
7962 BNX2X_ERR("NVRAM config error. "
7963 "Invalid link_config 0x%x"
7964 " speed_cap_mask 0x%x\n",
7965 bp->port.link_config,
7966 bp->link_params.speed_cap_mask);
7971 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7972 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7973 bp->link_params.req_line_speed = SPEED_10;
7974 bp->link_params.req_duplex = DUPLEX_HALF;
7975 bp->port.advertising = (ADVERTISED_10baseT_Half |
7978 BNX2X_ERR("NVRAM config error. "
7979 "Invalid link_config 0x%x"
7980 " speed_cap_mask 0x%x\n",
7981 bp->port.link_config,
7982 bp->link_params.speed_cap_mask);
7987 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7988 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7989 bp->link_params.req_line_speed = SPEED_100;
7990 bp->port.advertising = (ADVERTISED_100baseT_Full |
7993 BNX2X_ERR("NVRAM config error. "
7994 "Invalid link_config 0x%x"
7995 " speed_cap_mask 0x%x\n",
7996 bp->port.link_config,
7997 bp->link_params.speed_cap_mask);
8002 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8003 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8004 bp->link_params.req_line_speed = SPEED_100;
8005 bp->link_params.req_duplex = DUPLEX_HALF;
8006 bp->port.advertising = (ADVERTISED_100baseT_Half |
8009 BNX2X_ERR("NVRAM config error. "
8010 "Invalid link_config 0x%x"
8011 " speed_cap_mask 0x%x\n",
8012 bp->port.link_config,
8013 bp->link_params.speed_cap_mask);
8018 case PORT_FEATURE_LINK_SPEED_1G:
8019 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8020 bp->link_params.req_line_speed = SPEED_1000;
8021 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8024 BNX2X_ERR("NVRAM config error. "
8025 "Invalid link_config 0x%x"
8026 " speed_cap_mask 0x%x\n",
8027 bp->port.link_config,
8028 bp->link_params.speed_cap_mask);
8033 case PORT_FEATURE_LINK_SPEED_2_5G:
8034 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8035 bp->link_params.req_line_speed = SPEED_2500;
8036 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8039 BNX2X_ERR("NVRAM config error. "
8040 "Invalid link_config 0x%x"
8041 " speed_cap_mask 0x%x\n",
8042 bp->port.link_config,
8043 bp->link_params.speed_cap_mask);
8048 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8049 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8050 case PORT_FEATURE_LINK_SPEED_10G_KR:
8051 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8052 bp->link_params.req_line_speed = SPEED_10000;
8053 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8056 BNX2X_ERR("NVRAM config error. "
8057 "Invalid link_config 0x%x"
8058 " speed_cap_mask 0x%x\n",
8059 bp->port.link_config,
8060 bp->link_params.speed_cap_mask);
8066 BNX2X_ERR("NVRAM config error. "
8067 "BAD link speed link_config 0x%x\n",
8068 bp->port.link_config);
8069 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8070 bp->port.advertising = bp->port.supported;
8074 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8075 PORT_FEATURE_FLOW_CONTROL_MASK);
8076 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8077 !(bp->port.supported & SUPPORTED_Autoneg))
8078 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8080 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8081 " advertising 0x%x\n",
8082 bp->link_params.req_line_speed,
8083 bp->link_params.req_duplex,
8084 bp->link_params.req_flow_ctrl, bp->port.advertising);
8087 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8089 int port = BP_PORT(bp);
8094 bp->link_params.bp = bp;
8095 bp->link_params.port = port;
8097 bp->link_params.lane_config =
8098 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8099 bp->link_params.ext_phy_config =
8101 dev_info.port_hw_config[port].external_phy_config);
8102 bp->link_params.speed_cap_mask =
8104 dev_info.port_hw_config[port].speed_capability_mask);
8106 bp->port.link_config =
8107 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8109 /* Get the 4 lanes xgxs config rx and tx */
8110 for (i = 0; i < 2; i++) {
8112 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8113 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8114 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8117 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8118 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8119 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8122 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8123 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8124 bp->link_params.feature_config_flags |=
8125 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8127 bp->link_params.feature_config_flags &=
8128 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8130 /* If the device is capable of WoL, set the default state according
8133 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8134 (config & PORT_FEATURE_WOL_ENABLED));
8136 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8137 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8138 bp->link_params.lane_config,
8139 bp->link_params.ext_phy_config,
8140 bp->link_params.speed_cap_mask, bp->port.link_config);
8142 bp->link_params.switch_cfg = (bp->port.link_config &
8143 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8144 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8146 bnx2x_link_settings_requested(bp);
8148 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8149 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8150 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8151 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8152 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8153 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8154 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8155 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8156 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8157 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8160 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8162 int func = BP_FUNC(bp);
8166 bnx2x_get_common_hwinfo(bp);
8170 if (CHIP_IS_E1H(bp)) {
8172 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8174 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8175 FUNC_MF_CFG_E1HOV_TAG_MASK);
8176 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8180 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8182 func, bp->e1hov, bp->e1hov);
8184 BNX2X_DEV_INFO("single function mode\n");
8186 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8187 " aborting\n", func);
8193 if (!BP_NOMCP(bp)) {
8194 bnx2x_get_port_hwinfo(bp);
8196 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8197 DRV_MSG_SEQ_NUMBER_MASK);
8198 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8202 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8203 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8204 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8205 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8206 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8207 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8208 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8209 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8210 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8211 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8212 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8214 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8222 /* only supposed to happen on emulation/FPGA */
8223 BNX2X_ERR("warning random MAC workaround active\n");
8224 random_ether_addr(bp->dev->dev_addr);
8225 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8231 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8233 int func = BP_FUNC(bp);
8237 /* Disable interrupt handling until HW is initialized */
8238 atomic_set(&bp->intr_sem, 1);
8239 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8241 mutex_init(&bp->port.phy_mutex);
8243 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8244 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8246 rc = bnx2x_get_hwinfo(bp);
8248 /* need to reset chip if undi was active */
8250 bnx2x_undi_unload(bp);
8252 if (CHIP_REV_IS_FPGA(bp))
8253 printk(KERN_ERR PFX "FPGA detected\n");
8255 if (BP_NOMCP(bp) && (func == 0))
8257 "MCP disabled, must load devices in order!\n");
8259 /* Set multi queue mode */
8260 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8261 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8263 "Multi disabled since int_mode requested is not MSI-X\n");
8264 multi_mode = ETH_RSS_MODE_DISABLED;
8266 bp->multi_mode = multi_mode;
8271 bp->flags &= ~TPA_ENABLE_FLAG;
8272 bp->dev->features &= ~NETIF_F_LRO;
8274 bp->flags |= TPA_ENABLE_FLAG;
8275 bp->dev->features |= NETIF_F_LRO;
8280 bp->tx_ring_size = MAX_TX_AVAIL;
8281 bp->rx_ring_size = MAX_RX_AVAIL;
8288 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8289 bp->current_interval = (poll ? poll : timer_interval);
8291 init_timer(&bp->timer);
8292 bp->timer.expires = jiffies + bp->current_interval;
8293 bp->timer.data = (unsigned long) bp;
8294 bp->timer.function = bnx2x_timer;
8300 * ethtool service functions
8303 /* All ethtool functions called with rtnl_lock */
8305 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8307 struct bnx2x *bp = netdev_priv(dev);
8309 cmd->supported = bp->port.supported;
8310 cmd->advertising = bp->port.advertising;
8312 if (netif_carrier_ok(dev)) {
8313 cmd->speed = bp->link_vars.line_speed;
8314 cmd->duplex = bp->link_vars.duplex;
8316 cmd->speed = bp->link_params.req_line_speed;
8317 cmd->duplex = bp->link_params.req_duplex;
8322 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8323 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8324 if (vn_max_rate < cmd->speed)
8325 cmd->speed = vn_max_rate;
8328 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8330 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8332 switch (ext_phy_type) {
8333 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8335 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8336 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8337 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8338 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8339 cmd->port = PORT_FIBRE;
8342 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8343 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8344 cmd->port = PORT_TP;
8347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8348 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8349 bp->link_params.ext_phy_config);
8353 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8354 bp->link_params.ext_phy_config);
8358 cmd->port = PORT_TP;
8360 cmd->phy_address = bp->port.phy_addr;
8361 cmd->transceiver = XCVR_INTERNAL;
8363 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8364 cmd->autoneg = AUTONEG_ENABLE;
8366 cmd->autoneg = AUTONEG_DISABLE;
8371 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8372 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8373 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8374 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8375 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8376 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8377 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8382 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8384 struct bnx2x *bp = netdev_priv(dev);
8390 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8391 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8392 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8393 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8394 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8395 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8396 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8398 if (cmd->autoneg == AUTONEG_ENABLE) {
8399 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8400 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8404 /* advertise the requested speed and duplex if supported */
8405 cmd->advertising &= bp->port.supported;
8407 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8408 bp->link_params.req_duplex = DUPLEX_FULL;
8409 bp->port.advertising |= (ADVERTISED_Autoneg |
8412 } else { /* forced speed */
8413 /* advertise the requested speed and duplex if supported */
8414 switch (cmd->speed) {
8416 if (cmd->duplex == DUPLEX_FULL) {
8417 if (!(bp->port.supported &
8418 SUPPORTED_10baseT_Full)) {
8420 "10M full not supported\n");
8424 advertising = (ADVERTISED_10baseT_Full |
8427 if (!(bp->port.supported &
8428 SUPPORTED_10baseT_Half)) {
8430 "10M half not supported\n");
8434 advertising = (ADVERTISED_10baseT_Half |
8440 if (cmd->duplex == DUPLEX_FULL) {
8441 if (!(bp->port.supported &
8442 SUPPORTED_100baseT_Full)) {
8444 "100M full not supported\n");
8448 advertising = (ADVERTISED_100baseT_Full |
8451 if (!(bp->port.supported &
8452 SUPPORTED_100baseT_Half)) {
8454 "100M half not supported\n");
8458 advertising = (ADVERTISED_100baseT_Half |
8464 if (cmd->duplex != DUPLEX_FULL) {
8465 DP(NETIF_MSG_LINK, "1G half not supported\n");
8469 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8470 DP(NETIF_MSG_LINK, "1G full not supported\n");
8474 advertising = (ADVERTISED_1000baseT_Full |
8479 if (cmd->duplex != DUPLEX_FULL) {
8481 "2.5G half not supported\n");
8485 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8487 "2.5G full not supported\n");
8491 advertising = (ADVERTISED_2500baseX_Full |
8496 if (cmd->duplex != DUPLEX_FULL) {
8497 DP(NETIF_MSG_LINK, "10G half not supported\n");
8501 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8502 DP(NETIF_MSG_LINK, "10G full not supported\n");
8506 advertising = (ADVERTISED_10000baseT_Full |
8511 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8515 bp->link_params.req_line_speed = cmd->speed;
8516 bp->link_params.req_duplex = cmd->duplex;
8517 bp->port.advertising = advertising;
8520 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8521 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8522 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8523 bp->port.advertising);
8525 if (netif_running(dev)) {
8526 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8533 #define PHY_FW_VER_LEN 10
8535 static void bnx2x_get_drvinfo(struct net_device *dev,
8536 struct ethtool_drvinfo *info)
8538 struct bnx2x *bp = netdev_priv(dev);
8539 u8 phy_fw_ver[PHY_FW_VER_LEN];
8541 strcpy(info->driver, DRV_MODULE_NAME);
8542 strcpy(info->version, DRV_MODULE_VERSION);
8544 phy_fw_ver[0] = '\0';
8546 bnx2x_acquire_phy_lock(bp);
8547 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8548 (bp->state != BNX2X_STATE_CLOSED),
8549 phy_fw_ver, PHY_FW_VER_LEN);
8550 bnx2x_release_phy_lock(bp);
8553 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8554 (bp->common.bc_ver & 0xff0000) >> 16,
8555 (bp->common.bc_ver & 0xff00) >> 8,
8556 (bp->common.bc_ver & 0xff),
8557 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8558 strcpy(info->bus_info, pci_name(bp->pdev));
8559 info->n_stats = BNX2X_NUM_STATS;
8560 info->testinfo_len = BNX2X_NUM_TESTS;
8561 info->eedump_len = bp->common.flash_size;
8562 info->regdump_len = 0;
8565 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8566 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8568 static int bnx2x_get_regs_len(struct net_device *dev)
8570 static u32 regdump_len;
8571 struct bnx2x *bp = netdev_priv(dev);
8577 if (CHIP_IS_E1(bp)) {
8578 for (i = 0; i < REGS_COUNT; i++)
8579 if (IS_E1_ONLINE(reg_addrs[i].info))
8580 regdump_len += reg_addrs[i].size;
8582 for (i = 0; i < WREGS_COUNT_E1; i++)
8583 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8584 regdump_len += wreg_addrs_e1[i].size *
8585 (1 + wreg_addrs_e1[i].read_regs_count);
8588 for (i = 0; i < REGS_COUNT; i++)
8589 if (IS_E1H_ONLINE(reg_addrs[i].info))
8590 regdump_len += reg_addrs[i].size;
8592 for (i = 0; i < WREGS_COUNT_E1H; i++)
8593 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8594 regdump_len += wreg_addrs_e1h[i].size *
8595 (1 + wreg_addrs_e1h[i].read_regs_count);
8598 regdump_len += sizeof(struct dump_hdr);
8603 static void bnx2x_get_regs(struct net_device *dev,
8604 struct ethtool_regs *regs, void *_p)
8607 struct bnx2x *bp = netdev_priv(dev);
8608 struct dump_hdr dump_hdr = {0};
8611 memset(p, 0, regs->len);
8613 if (!netif_running(bp->dev))
8616 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8617 dump_hdr.dump_sign = dump_sign_all;
8618 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8619 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8620 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8621 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8622 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8624 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8625 p += dump_hdr.hdr_size + 1;
8627 if (CHIP_IS_E1(bp)) {
8628 for (i = 0; i < REGS_COUNT; i++)
8629 if (IS_E1_ONLINE(reg_addrs[i].info))
8630 for (j = 0; j < reg_addrs[i].size; j++)
8632 reg_addrs[i].addr + j*4);
8635 for (i = 0; i < REGS_COUNT; i++)
8636 if (IS_E1H_ONLINE(reg_addrs[i].info))
8637 for (j = 0; j < reg_addrs[i].size; j++)
8639 reg_addrs[i].addr + j*4);
8643 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8645 struct bnx2x *bp = netdev_priv(dev);
8647 if (bp->flags & NO_WOL_FLAG) {
8651 wol->supported = WAKE_MAGIC;
8653 wol->wolopts = WAKE_MAGIC;
8657 memset(&wol->sopass, 0, sizeof(wol->sopass));
8660 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8662 struct bnx2x *bp = netdev_priv(dev);
8664 if (wol->wolopts & ~WAKE_MAGIC)
8667 if (wol->wolopts & WAKE_MAGIC) {
8668 if (bp->flags & NO_WOL_FLAG)
8678 static u32 bnx2x_get_msglevel(struct net_device *dev)
8680 struct bnx2x *bp = netdev_priv(dev);
8682 return bp->msglevel;
8685 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8687 struct bnx2x *bp = netdev_priv(dev);
8689 if (capable(CAP_NET_ADMIN))
8690 bp->msglevel = level;
8693 static int bnx2x_nway_reset(struct net_device *dev)
8695 struct bnx2x *bp = netdev_priv(dev);
8700 if (netif_running(dev)) {
8701 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8709 bnx2x_get_link(struct net_device *dev)
8711 struct bnx2x *bp = netdev_priv(dev);
8713 return bp->link_vars.link_up;
8716 static int bnx2x_get_eeprom_len(struct net_device *dev)
8718 struct bnx2x *bp = netdev_priv(dev);
8720 return bp->common.flash_size;
8723 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8725 int port = BP_PORT(bp);
8729 /* adjust timeout for emulation/FPGA */
8730 count = NVRAM_TIMEOUT_COUNT;
8731 if (CHIP_REV_IS_SLOW(bp))
8734 /* request access to nvram interface */
8735 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8736 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8738 for (i = 0; i < count*10; i++) {
8739 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8740 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8746 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8747 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8754 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8756 int port = BP_PORT(bp);
8760 /* adjust timeout for emulation/FPGA */
8761 count = NVRAM_TIMEOUT_COUNT;
8762 if (CHIP_REV_IS_SLOW(bp))
8765 /* relinquish nvram interface */
8766 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8767 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8769 for (i = 0; i < count*10; i++) {
8770 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8771 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8777 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8778 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8785 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8789 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8791 /* enable both bits, even on read */
8792 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8793 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8794 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8797 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8801 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8803 /* disable both bits, even after read */
8804 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8805 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8806 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8809 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8815 /* build the command word */
8816 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8818 /* need to clear DONE bit separately */
8819 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8821 /* address of the NVRAM to read from */
8822 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8823 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8825 /* issue a read command */
8826 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8828 /* adjust timeout for emulation/FPGA */
8829 count = NVRAM_TIMEOUT_COUNT;
8830 if (CHIP_REV_IS_SLOW(bp))
8833 /* wait for completion */
8836 for (i = 0; i < count; i++) {
8838 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8840 if (val & MCPR_NVM_COMMAND_DONE) {
8841 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8842 /* we read nvram data in cpu order
8843 * but ethtool sees it as an array of bytes
8844 * converting to big-endian will do the work */
8845 *ret_val = cpu_to_be32(val);
8854 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8861 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8863 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8868 if (offset + buf_size > bp->common.flash_size) {
8869 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8870 " buf_size (0x%x) > flash_size (0x%x)\n",
8871 offset, buf_size, bp->common.flash_size);
8875 /* request access to nvram interface */
8876 rc = bnx2x_acquire_nvram_lock(bp);
8880 /* enable access to nvram interface */
8881 bnx2x_enable_nvram_access(bp);
8883 /* read the first word(s) */
8884 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8885 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8886 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8887 memcpy(ret_buf, &val, 4);
8889 /* advance to the next dword */
8890 offset += sizeof(u32);
8891 ret_buf += sizeof(u32);
8892 buf_size -= sizeof(u32);
8897 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8898 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8899 memcpy(ret_buf, &val, 4);
8902 /* disable access to nvram interface */
8903 bnx2x_disable_nvram_access(bp);
8904 bnx2x_release_nvram_lock(bp);
8909 static int bnx2x_get_eeprom(struct net_device *dev,
8910 struct ethtool_eeprom *eeprom, u8 *eebuf)
8912 struct bnx2x *bp = netdev_priv(dev);
8915 if (!netif_running(dev))
8918 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8919 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8920 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8921 eeprom->len, eeprom->len);
8923 /* parameters already validated in ethtool_get_eeprom */
8925 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8930 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8935 /* build the command word */
8936 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8938 /* need to clear DONE bit separately */
8939 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8941 /* write the data */
8942 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8944 /* address of the NVRAM to write to */
8945 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8946 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8948 /* issue the write command */
8949 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8951 /* adjust timeout for emulation/FPGA */
8952 count = NVRAM_TIMEOUT_COUNT;
8953 if (CHIP_REV_IS_SLOW(bp))
8956 /* wait for completion */
8958 for (i = 0; i < count; i++) {
8960 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8961 if (val & MCPR_NVM_COMMAND_DONE) {
8970 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8972 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8980 if (offset + buf_size > bp->common.flash_size) {
8981 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8982 " buf_size (0x%x) > flash_size (0x%x)\n",
8983 offset, buf_size, bp->common.flash_size);
8987 /* request access to nvram interface */
8988 rc = bnx2x_acquire_nvram_lock(bp);
8992 /* enable access to nvram interface */
8993 bnx2x_enable_nvram_access(bp);
8995 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8996 align_offset = (offset & ~0x03);
8997 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9000 val &= ~(0xff << BYTE_OFFSET(offset));
9001 val |= (*data_buf << BYTE_OFFSET(offset));
9003 /* nvram data is returned as an array of bytes
9004 * convert it back to cpu order */
9005 val = be32_to_cpu(val);
9007 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9011 /* disable access to nvram interface */
9012 bnx2x_disable_nvram_access(bp);
9013 bnx2x_release_nvram_lock(bp);
9018 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9026 if (buf_size == 1) /* ethtool */
9027 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9029 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9031 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9036 if (offset + buf_size > bp->common.flash_size) {
9037 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9038 " buf_size (0x%x) > flash_size (0x%x)\n",
9039 offset, buf_size, bp->common.flash_size);
9043 /* request access to nvram interface */
9044 rc = bnx2x_acquire_nvram_lock(bp);
9048 /* enable access to nvram interface */
9049 bnx2x_enable_nvram_access(bp);
9052 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9053 while ((written_so_far < buf_size) && (rc == 0)) {
9054 if (written_so_far == (buf_size - sizeof(u32)))
9055 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9056 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9057 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9058 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9059 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9061 memcpy(&val, data_buf, 4);
9063 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9065 /* advance to the next dword */
9066 offset += sizeof(u32);
9067 data_buf += sizeof(u32);
9068 written_so_far += sizeof(u32);
9072 /* disable access to nvram interface */
9073 bnx2x_disable_nvram_access(bp);
9074 bnx2x_release_nvram_lock(bp);
9079 static int bnx2x_set_eeprom(struct net_device *dev,
9080 struct ethtool_eeprom *eeprom, u8 *eebuf)
9082 struct bnx2x *bp = netdev_priv(dev);
9085 if (!netif_running(dev))
9088 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9089 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9090 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9091 eeprom->len, eeprom->len);
9093 /* parameters already validated in ethtool_set_eeprom */
9095 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9096 if (eeprom->magic == 0x00504859)
9099 bnx2x_acquire_phy_lock(bp);
9100 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9101 bp->link_params.ext_phy_config,
9102 (bp->state != BNX2X_STATE_CLOSED),
9103 eebuf, eeprom->len);
9104 if ((bp->state == BNX2X_STATE_OPEN) ||
9105 (bp->state == BNX2X_STATE_DISABLED)) {
9106 rc |= bnx2x_link_reset(&bp->link_params,
9108 rc |= bnx2x_phy_init(&bp->link_params,
9111 bnx2x_release_phy_lock(bp);
9113 } else /* Only the PMF can access the PHY */
9116 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9121 static int bnx2x_get_coalesce(struct net_device *dev,
9122 struct ethtool_coalesce *coal)
9124 struct bnx2x *bp = netdev_priv(dev);
9126 memset(coal, 0, sizeof(struct ethtool_coalesce));
9128 coal->rx_coalesce_usecs = bp->rx_ticks;
9129 coal->tx_coalesce_usecs = bp->tx_ticks;
9134 static int bnx2x_set_coalesce(struct net_device *dev,
9135 struct ethtool_coalesce *coal)
9137 struct bnx2x *bp = netdev_priv(dev);
9139 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9140 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9141 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9143 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9144 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9145 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9147 if (netif_running(dev))
9148 bnx2x_update_coalesce(bp);
9153 static void bnx2x_get_ringparam(struct net_device *dev,
9154 struct ethtool_ringparam *ering)
9156 struct bnx2x *bp = netdev_priv(dev);
9158 ering->rx_max_pending = MAX_RX_AVAIL;
9159 ering->rx_mini_max_pending = 0;
9160 ering->rx_jumbo_max_pending = 0;
9162 ering->rx_pending = bp->rx_ring_size;
9163 ering->rx_mini_pending = 0;
9164 ering->rx_jumbo_pending = 0;
9166 ering->tx_max_pending = MAX_TX_AVAIL;
9167 ering->tx_pending = bp->tx_ring_size;
9170 static int bnx2x_set_ringparam(struct net_device *dev,
9171 struct ethtool_ringparam *ering)
9173 struct bnx2x *bp = netdev_priv(dev);
9176 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9177 (ering->tx_pending > MAX_TX_AVAIL) ||
9178 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9181 bp->rx_ring_size = ering->rx_pending;
9182 bp->tx_ring_size = ering->tx_pending;
9184 if (netif_running(dev)) {
9185 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9186 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9192 static void bnx2x_get_pauseparam(struct net_device *dev,
9193 struct ethtool_pauseparam *epause)
9195 struct bnx2x *bp = netdev_priv(dev);
9197 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9198 BNX2X_FLOW_CTRL_AUTO) &&
9199 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9201 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9202 BNX2X_FLOW_CTRL_RX);
9203 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9204 BNX2X_FLOW_CTRL_TX);
9206 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9207 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9208 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9211 static int bnx2x_set_pauseparam(struct net_device *dev,
9212 struct ethtool_pauseparam *epause)
9214 struct bnx2x *bp = netdev_priv(dev);
9219 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9220 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9221 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9223 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9225 if (epause->rx_pause)
9226 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9228 if (epause->tx_pause)
9229 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9231 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9232 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9234 if (epause->autoneg) {
9235 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9236 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9240 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9241 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9245 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9247 if (netif_running(dev)) {
9248 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9255 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9257 struct bnx2x *bp = netdev_priv(dev);
9261 /* TPA requires Rx CSUM offloading */
9262 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9263 if (!(dev->features & NETIF_F_LRO)) {
9264 dev->features |= NETIF_F_LRO;
9265 bp->flags |= TPA_ENABLE_FLAG;
9269 } else if (dev->features & NETIF_F_LRO) {
9270 dev->features &= ~NETIF_F_LRO;
9271 bp->flags &= ~TPA_ENABLE_FLAG;
9275 if (changed && netif_running(dev)) {
9276 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9277 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9283 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9285 struct bnx2x *bp = netdev_priv(dev);
9290 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9292 struct bnx2x *bp = netdev_priv(dev);
9297 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9298 TPA'ed packets will be discarded due to wrong TCP CSUM */
9300 u32 flags = ethtool_op_get_flags(dev);
9302 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9308 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9311 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9312 dev->features |= NETIF_F_TSO6;
9314 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9315 dev->features &= ~NETIF_F_TSO6;
9321 static const struct {
9322 char string[ETH_GSTRING_LEN];
9323 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9324 { "register_test (offline)" },
9325 { "memory_test (offline)" },
9326 { "loopback_test (offline)" },
9327 { "nvram_test (online)" },
9328 { "interrupt_test (online)" },
9329 { "link_test (online)" },
9330 { "idle check (online)" }
9333 static int bnx2x_self_test_count(struct net_device *dev)
9335 return BNX2X_NUM_TESTS;
9338 static int bnx2x_test_registers(struct bnx2x *bp)
9340 int idx, i, rc = -ENODEV;
9342 int port = BP_PORT(bp);
9343 static const struct {
9348 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9349 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9350 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9351 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9352 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9353 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9354 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9355 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9356 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9357 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9358 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9359 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9360 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9361 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9362 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9363 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9364 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9365 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9366 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9367 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9368 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9369 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9370 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9371 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9372 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9373 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9374 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9375 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9376 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9377 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9378 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9379 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9380 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9381 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9382 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9383 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9384 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9385 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9387 { 0xffffffff, 0, 0x00000000 }
9390 if (!netif_running(bp->dev))
9393 /* Repeat the test twice:
9394 First by writing 0x00000000, second by writing 0xffffffff */
9395 for (idx = 0; idx < 2; idx++) {
9402 wr_val = 0xffffffff;
9406 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9407 u32 offset, mask, save_val, val;
9409 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9410 mask = reg_tbl[i].mask;
9412 save_val = REG_RD(bp, offset);
9414 REG_WR(bp, offset, wr_val);
9415 val = REG_RD(bp, offset);
9417 /* Restore the original register's value */
9418 REG_WR(bp, offset, save_val);
9420 /* verify that value is as expected value */
9421 if ((val & mask) != (wr_val & mask))
9432 static int bnx2x_test_memory(struct bnx2x *bp)
9434 int i, j, rc = -ENODEV;
9436 static const struct {
9440 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9441 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9442 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9443 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9444 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9445 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9446 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9450 static const struct {
9456 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9457 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9458 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9459 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9460 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9461 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9463 { NULL, 0xffffffff, 0, 0 }
9466 if (!netif_running(bp->dev))
9469 /* Go through all the memories */
9470 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9471 for (j = 0; j < mem_tbl[i].size; j++)
9472 REG_RD(bp, mem_tbl[i].offset + j*4);
9474 /* Check the parity status */
9475 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9476 val = REG_RD(bp, prty_tbl[i].offset);
9477 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9478 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9480 "%s is 0x%x\n", prty_tbl[i].name, val);
9491 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9496 while (bnx2x_link_test(bp) && cnt--)
9500 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9502 unsigned int pkt_size, num_pkts, i;
9503 struct sk_buff *skb;
9504 unsigned char *packet;
9505 struct bnx2x_fastpath *fp = &bp->fp[0];
9506 u16 tx_start_idx, tx_idx;
9507 u16 rx_start_idx, rx_idx;
9509 struct sw_tx_bd *tx_buf;
9510 struct eth_tx_bd *tx_bd;
9512 union eth_rx_cqe *cqe;
9514 struct sw_rx_bd *rx_buf;
9518 /* check the loopback mode */
9519 switch (loopback_mode) {
9520 case BNX2X_PHY_LOOPBACK:
9521 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9524 case BNX2X_MAC_LOOPBACK:
9525 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9526 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9532 /* prepare the loopback packet */
9533 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9534 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9535 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9538 goto test_loopback_exit;
9540 packet = skb_put(skb, pkt_size);
9541 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9542 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9543 for (i = ETH_HLEN; i < pkt_size; i++)
9544 packet[i] = (unsigned char) (i & 0xff);
9546 /* send the loopback packet */
9548 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9549 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9551 pkt_prod = fp->tx_pkt_prod++;
9552 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9553 tx_buf->first_bd = fp->tx_bd_prod;
9556 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9557 mapping = pci_map_single(bp->pdev, skb->data,
9558 skb_headlen(skb), PCI_DMA_TODEVICE);
9559 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9560 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9561 tx_bd->nbd = cpu_to_le16(1);
9562 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9563 tx_bd->vlan = cpu_to_le16(pkt_prod);
9564 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9565 ETH_TX_BD_FLAGS_END_BD);
9566 tx_bd->general_data = ((UNICAST_ADDRESS <<
9567 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9571 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9572 mb(); /* FW restriction: must not reorder writing nbd and packets */
9573 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9574 DOORBELL(bp, fp->index, 0);
9580 bp->dev->trans_start = jiffies;
9584 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9585 if (tx_idx != tx_start_idx + num_pkts)
9586 goto test_loopback_exit;
9588 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9589 if (rx_idx != rx_start_idx + num_pkts)
9590 goto test_loopback_exit;
9592 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9593 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9594 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9595 goto test_loopback_rx_exit;
9597 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9598 if (len != pkt_size)
9599 goto test_loopback_rx_exit;
9601 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9603 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9604 for (i = ETH_HLEN; i < pkt_size; i++)
9605 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9606 goto test_loopback_rx_exit;
9610 test_loopback_rx_exit:
9612 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9613 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9614 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9615 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9617 /* Update producers */
9618 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9622 bp->link_params.loopback_mode = LOOPBACK_NONE;
9627 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9631 if (!netif_running(bp->dev))
9632 return BNX2X_LOOPBACK_FAILED;
9634 bnx2x_netif_stop(bp, 1);
9635 bnx2x_acquire_phy_lock(bp);
9637 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9639 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9640 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9643 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9645 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9646 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9649 bnx2x_release_phy_lock(bp);
9650 bnx2x_netif_start(bp);
9655 #define CRC32_RESIDUAL 0xdebb20e3
9657 static int bnx2x_test_nvram(struct bnx2x *bp)
9659 static const struct {
9663 { 0, 0x14 }, /* bootstrap */
9664 { 0x14, 0xec }, /* dir */
9665 { 0x100, 0x350 }, /* manuf_info */
9666 { 0x450, 0xf0 }, /* feature_info */
9667 { 0x640, 0x64 }, /* upgrade_key_info */
9669 { 0x708, 0x70 }, /* manuf_key_info */
9673 __be32 buf[0x350 / 4];
9674 u8 *data = (u8 *)buf;
9678 rc = bnx2x_nvram_read(bp, 0, data, 4);
9680 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9681 goto test_nvram_exit;
9684 magic = be32_to_cpu(buf[0]);
9685 if (magic != 0x669955aa) {
9686 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9688 goto test_nvram_exit;
9691 for (i = 0; nvram_tbl[i].size; i++) {
9693 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9697 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9698 goto test_nvram_exit;
9701 csum = ether_crc_le(nvram_tbl[i].size, data);
9702 if (csum != CRC32_RESIDUAL) {
9704 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9706 goto test_nvram_exit;
9714 static int bnx2x_test_intr(struct bnx2x *bp)
9716 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9719 if (!netif_running(bp->dev))
9722 config->hdr.length = 0;
9724 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9726 config->hdr.offset = BP_FUNC(bp);
9727 config->hdr.client_id = bp->fp->cl_id;
9728 config->hdr.reserved1 = 0;
9730 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9731 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9732 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9734 bp->set_mac_pending++;
9735 for (i = 0; i < 10; i++) {
9736 if (!bp->set_mac_pending)
9738 msleep_interruptible(10);
9747 static void bnx2x_self_test(struct net_device *dev,
9748 struct ethtool_test *etest, u64 *buf)
9750 struct bnx2x *bp = netdev_priv(dev);
9752 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9754 if (!netif_running(dev))
9757 /* offline tests are not supported in MF mode */
9759 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9761 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9762 int port = BP_PORT(bp);
9766 /* save current value of input enable for TX port IF */
9767 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9768 /* disable input for TX port IF */
9769 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9771 link_up = bp->link_vars.link_up;
9772 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9773 bnx2x_nic_load(bp, LOAD_DIAG);
9774 /* wait until link state is restored */
9775 bnx2x_wait_for_link(bp, link_up);
9777 if (bnx2x_test_registers(bp) != 0) {
9779 etest->flags |= ETH_TEST_FL_FAILED;
9781 if (bnx2x_test_memory(bp) != 0) {
9783 etest->flags |= ETH_TEST_FL_FAILED;
9785 buf[2] = bnx2x_test_loopback(bp, link_up);
9787 etest->flags |= ETH_TEST_FL_FAILED;
9789 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9791 /* restore input for TX port IF */
9792 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9794 bnx2x_nic_load(bp, LOAD_NORMAL);
9795 /* wait until link state is restored */
9796 bnx2x_wait_for_link(bp, link_up);
9798 if (bnx2x_test_nvram(bp) != 0) {
9800 etest->flags |= ETH_TEST_FL_FAILED;
9802 if (bnx2x_test_intr(bp) != 0) {
9804 etest->flags |= ETH_TEST_FL_FAILED;
9807 if (bnx2x_link_test(bp) != 0) {
9809 etest->flags |= ETH_TEST_FL_FAILED;
9812 #ifdef BNX2X_EXTRA_DEBUG
9813 bnx2x_panic_dump(bp);
9817 static const struct {
9820 u8 string[ETH_GSTRING_LEN];
9821 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9822 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9823 { Q_STATS_OFFSET32(error_bytes_received_hi),
9824 8, "[%d]: rx_error_bytes" },
9825 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9826 8, "[%d]: rx_ucast_packets" },
9827 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9828 8, "[%d]: rx_mcast_packets" },
9829 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9830 8, "[%d]: rx_bcast_packets" },
9831 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9832 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9833 4, "[%d]: rx_phy_ip_err_discards"},
9834 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9835 4, "[%d]: rx_skb_alloc_discard" },
9836 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9838 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9839 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9840 8, "[%d]: tx_packets" }
9843 static const struct {
9847 #define STATS_FLAGS_PORT 1
9848 #define STATS_FLAGS_FUNC 2
9849 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9850 u8 string[ETH_GSTRING_LEN];
9851 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9852 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9853 8, STATS_FLAGS_BOTH, "rx_bytes" },
9854 { STATS_OFFSET32(error_bytes_received_hi),
9855 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9856 { STATS_OFFSET32(total_unicast_packets_received_hi),
9857 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9858 { STATS_OFFSET32(total_multicast_packets_received_hi),
9859 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9860 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9861 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9862 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9863 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9864 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9865 8, STATS_FLAGS_PORT, "rx_align_errors" },
9866 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9867 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9868 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9869 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9870 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9871 8, STATS_FLAGS_PORT, "rx_fragments" },
9872 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9873 8, STATS_FLAGS_PORT, "rx_jabbers" },
9874 { STATS_OFFSET32(no_buff_discard_hi),
9875 8, STATS_FLAGS_BOTH, "rx_discards" },
9876 { STATS_OFFSET32(mac_filter_discard),
9877 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9878 { STATS_OFFSET32(xxoverflow_discard),
9879 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9880 { STATS_OFFSET32(brb_drop_hi),
9881 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9882 { STATS_OFFSET32(brb_truncate_hi),
9883 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9884 { STATS_OFFSET32(pause_frames_received_hi),
9885 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9886 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9887 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9888 { STATS_OFFSET32(nig_timer_max),
9889 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9890 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9891 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9892 { STATS_OFFSET32(rx_skb_alloc_failed),
9893 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9894 { STATS_OFFSET32(hw_csum_err),
9895 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9897 { STATS_OFFSET32(total_bytes_transmitted_hi),
9898 8, STATS_FLAGS_BOTH, "tx_bytes" },
9899 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9900 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9901 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9902 8, STATS_FLAGS_BOTH, "tx_packets" },
9903 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9904 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9905 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9906 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9907 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9908 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9909 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9910 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9911 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9912 8, STATS_FLAGS_PORT, "tx_deferred" },
9913 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9914 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9915 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9916 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9917 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9918 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9919 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9920 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9921 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9922 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9923 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9924 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9925 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9926 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9927 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9928 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9929 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9930 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9931 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9932 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9933 { STATS_OFFSET32(pause_frames_sent_hi),
9934 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9937 #define IS_PORT_STAT(i) \
9938 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9939 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9940 #define IS_E1HMF_MODE_STAT(bp) \
9941 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9943 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9945 struct bnx2x *bp = netdev_priv(dev);
9948 switch (stringset) {
9952 for_each_queue(bp, i) {
9953 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9954 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9955 bnx2x_q_stats_arr[j].string, i);
9956 k += BNX2X_NUM_Q_STATS;
9958 if (IS_E1HMF_MODE_STAT(bp))
9960 for (j = 0; j < BNX2X_NUM_STATS; j++)
9961 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9962 bnx2x_stats_arr[j].string);
9964 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9965 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9967 strcpy(buf + j*ETH_GSTRING_LEN,
9968 bnx2x_stats_arr[i].string);
9975 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9980 static int bnx2x_get_stats_count(struct net_device *dev)
9982 struct bnx2x *bp = netdev_priv(dev);
9986 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9987 if (!IS_E1HMF_MODE_STAT(bp))
9988 num_stats += BNX2X_NUM_STATS;
9990 if (IS_E1HMF_MODE_STAT(bp)) {
9992 for (i = 0; i < BNX2X_NUM_STATS; i++)
9993 if (IS_FUNC_STAT(i))
9996 num_stats = BNX2X_NUM_STATS;
10002 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10003 struct ethtool_stats *stats, u64 *buf)
10005 struct bnx2x *bp = netdev_priv(dev);
10006 u32 *hw_stats, *offset;
10009 if (is_multi(bp)) {
10011 for_each_queue(bp, i) {
10012 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10013 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10014 if (bnx2x_q_stats_arr[j].size == 0) {
10015 /* skip this counter */
10019 offset = (hw_stats +
10020 bnx2x_q_stats_arr[j].offset);
10021 if (bnx2x_q_stats_arr[j].size == 4) {
10022 /* 4-byte counter */
10023 buf[k + j] = (u64) *offset;
10026 /* 8-byte counter */
10027 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10029 k += BNX2X_NUM_Q_STATS;
10031 if (IS_E1HMF_MODE_STAT(bp))
10033 hw_stats = (u32 *)&bp->eth_stats;
10034 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10035 if (bnx2x_stats_arr[j].size == 0) {
10036 /* skip this counter */
10040 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10041 if (bnx2x_stats_arr[j].size == 4) {
10042 /* 4-byte counter */
10043 buf[k + j] = (u64) *offset;
10046 /* 8-byte counter */
10047 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10050 hw_stats = (u32 *)&bp->eth_stats;
10051 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10052 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10054 if (bnx2x_stats_arr[i].size == 0) {
10055 /* skip this counter */
10060 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10061 if (bnx2x_stats_arr[i].size == 4) {
10062 /* 4-byte counter */
10063 buf[j] = (u64) *offset;
10067 /* 8-byte counter */
10068 buf[j] = HILO_U64(*offset, *(offset + 1));
10074 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10076 struct bnx2x *bp = netdev_priv(dev);
10077 int port = BP_PORT(bp);
10080 if (!netif_running(dev))
10089 for (i = 0; i < (data * 2); i++) {
10091 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10092 bp->link_params.hw_led_mode,
10093 bp->link_params.chip_id);
10095 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10096 bp->link_params.hw_led_mode,
10097 bp->link_params.chip_id);
10099 msleep_interruptible(500);
10100 if (signal_pending(current))
10104 if (bp->link_vars.link_up)
10105 bnx2x_set_led(bp, port, LED_MODE_OPER,
10106 bp->link_vars.line_speed,
10107 bp->link_params.hw_led_mode,
10108 bp->link_params.chip_id);
10113 static struct ethtool_ops bnx2x_ethtool_ops = {
10114 .get_settings = bnx2x_get_settings,
10115 .set_settings = bnx2x_set_settings,
10116 .get_drvinfo = bnx2x_get_drvinfo,
10117 .get_regs_len = bnx2x_get_regs_len,
10118 .get_regs = bnx2x_get_regs,
10119 .get_wol = bnx2x_get_wol,
10120 .set_wol = bnx2x_set_wol,
10121 .get_msglevel = bnx2x_get_msglevel,
10122 .set_msglevel = bnx2x_set_msglevel,
10123 .nway_reset = bnx2x_nway_reset,
10124 .get_link = bnx2x_get_link,
10125 .get_eeprom_len = bnx2x_get_eeprom_len,
10126 .get_eeprom = bnx2x_get_eeprom,
10127 .set_eeprom = bnx2x_set_eeprom,
10128 .get_coalesce = bnx2x_get_coalesce,
10129 .set_coalesce = bnx2x_set_coalesce,
10130 .get_ringparam = bnx2x_get_ringparam,
10131 .set_ringparam = bnx2x_set_ringparam,
10132 .get_pauseparam = bnx2x_get_pauseparam,
10133 .set_pauseparam = bnx2x_set_pauseparam,
10134 .get_rx_csum = bnx2x_get_rx_csum,
10135 .set_rx_csum = bnx2x_set_rx_csum,
10136 .get_tx_csum = ethtool_op_get_tx_csum,
10137 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10138 .set_flags = bnx2x_set_flags,
10139 .get_flags = ethtool_op_get_flags,
10140 .get_sg = ethtool_op_get_sg,
10141 .set_sg = ethtool_op_set_sg,
10142 .get_tso = ethtool_op_get_tso,
10143 .set_tso = bnx2x_set_tso,
10144 .self_test_count = bnx2x_self_test_count,
10145 .self_test = bnx2x_self_test,
10146 .get_strings = bnx2x_get_strings,
10147 .phys_id = bnx2x_phys_id,
10148 .get_stats_count = bnx2x_get_stats_count,
10149 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10152 /* end of ethtool_ops */
10154 /****************************************************************************
10155 * General service functions
10156 ****************************************************************************/
10158 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10162 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10166 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10167 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10168 PCI_PM_CTRL_PME_STATUS));
10170 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10171 /* delay required during transition out of D3hot */
10176 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10180 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10182 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10185 /* No more memory access after this point until
10186 * device is brought back to D0.
10196 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10200 /* Tell compiler that status block fields can change */
10202 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10203 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10205 return (fp->rx_comp_cons != rx_cons_sb);
10209 * net_device service functions
10212 static int bnx2x_poll(struct napi_struct *napi, int budget)
10214 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10216 struct bnx2x *bp = fp->bp;
10219 #ifdef BNX2X_STOP_ON_ERROR
10220 if (unlikely(bp->panic))
10224 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10225 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10226 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10228 bnx2x_update_fpsb_idx(fp);
10230 if (bnx2x_has_tx_work(fp))
10233 if (bnx2x_has_rx_work(fp)) {
10234 work_done = bnx2x_rx_int(fp, budget);
10236 /* must not complete if we consumed full budget */
10237 if (work_done >= budget)
10241 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10242 * ensure that status block indices have been actually read
10243 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10244 * so that we won't write the "newer" value of the status block to IGU
10245 * (if there was a DMA right after BNX2X_HAS_WORK and
10246 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10247 * may be postponed to right before bnx2x_ack_sb). In this case
10248 * there will never be another interrupt until there is another update
10249 * of the status block, while there is still unhandled work.
10253 if (!BNX2X_HAS_WORK(fp)) {
10254 #ifdef BNX2X_STOP_ON_ERROR
10257 napi_complete(napi);
10259 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10260 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10261 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10262 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10270 /* we split the first BD into headers and data BDs
10271 * to ease the pain of our fellow microcode engineers
10272 * we use one mapping for both BDs
10273 * So far this has only been observed to happen
10274 * in Other Operating Systems(TM)
10276 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10277 struct bnx2x_fastpath *fp,
10278 struct eth_tx_bd **tx_bd, u16 hlen,
10279 u16 bd_prod, int nbd)
10281 struct eth_tx_bd *h_tx_bd = *tx_bd;
10282 struct eth_tx_bd *d_tx_bd;
10283 dma_addr_t mapping;
10284 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10286 /* first fix first BD */
10287 h_tx_bd->nbd = cpu_to_le16(nbd);
10288 h_tx_bd->nbytes = cpu_to_le16(hlen);
10290 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10291 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10292 h_tx_bd->addr_lo, h_tx_bd->nbd);
10294 /* now get a new data BD
10295 * (after the pbd) and fill it */
10296 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10297 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10299 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10300 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10302 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10303 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10304 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10306 /* this marks the BD as one that has no individual mapping
10307 * the FW ignores this flag in a BD not marked start
10309 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10310 DP(NETIF_MSG_TX_QUEUED,
10311 "TSO split data size is %d (%x:%x)\n",
10312 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10314 /* update tx_bd for marking the last BD flag */
10320 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10323 csum = (u16) ~csum_fold(csum_sub(csum,
10324 csum_partial(t_header - fix, fix, 0)));
10327 csum = (u16) ~csum_fold(csum_add(csum,
10328 csum_partial(t_header, -fix, 0)));
10330 return swab16(csum);
10333 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10337 if (skb->ip_summed != CHECKSUM_PARTIAL)
10341 if (skb->protocol == htons(ETH_P_IPV6)) {
10343 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10344 rc |= XMIT_CSUM_TCP;
10348 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10349 rc |= XMIT_CSUM_TCP;
10353 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10356 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10362 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10363 /* check if packet requires linearization (packet is too fragmented)
10364 no need to check fragmentation if page size > 8K (there will be no
10365 violation to FW restrictions) */
10366 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10371 int first_bd_sz = 0;
10373 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10374 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10376 if (xmit_type & XMIT_GSO) {
10377 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10378 /* Check if LSO packet needs to be copied:
10379 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10380 int wnd_size = MAX_FETCH_BD - 3;
10381 /* Number of windows to check */
10382 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10387 /* Headers length */
10388 hlen = (int)(skb_transport_header(skb) - skb->data) +
10391 /* Amount of data (w/o headers) on linear part of SKB*/
10392 first_bd_sz = skb_headlen(skb) - hlen;
10394 wnd_sum = first_bd_sz;
10396 /* Calculate the first sum - it's special */
10397 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10399 skb_shinfo(skb)->frags[frag_idx].size;
10401 /* If there was data on linear skb data - check it */
10402 if (first_bd_sz > 0) {
10403 if (unlikely(wnd_sum < lso_mss)) {
10408 wnd_sum -= first_bd_sz;
10411 /* Others are easier: run through the frag list and
10412 check all windows */
10413 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10415 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10417 if (unlikely(wnd_sum < lso_mss)) {
10422 skb_shinfo(skb)->frags[wnd_idx].size;
10425 /* in non-LSO too fragmented packet should always
10432 if (unlikely(to_copy))
10433 DP(NETIF_MSG_TX_QUEUED,
10434 "Linearization IS REQUIRED for %s packet. "
10435 "num_frags %d hlen %d first_bd_sz %d\n",
10436 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10437 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10443 /* called with netif_tx_lock
10444 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10445 * netif_wake_queue()
10447 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10449 struct bnx2x *bp = netdev_priv(dev);
10450 struct bnx2x_fastpath *fp;
10451 struct netdev_queue *txq;
10452 struct sw_tx_bd *tx_buf;
10453 struct eth_tx_bd *tx_bd;
10454 struct eth_tx_parse_bd *pbd = NULL;
10455 u16 pkt_prod, bd_prod;
10457 dma_addr_t mapping;
10458 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10459 int vlan_off = (bp->e1hov ? 4 : 0);
10463 #ifdef BNX2X_STOP_ON_ERROR
10464 if (unlikely(bp->panic))
10465 return NETDEV_TX_BUSY;
10468 fp_index = skb_get_queue_mapping(skb);
10469 txq = netdev_get_tx_queue(dev, fp_index);
10471 fp = &bp->fp[fp_index];
10473 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10474 fp->eth_q_stats.driver_xoff++,
10475 netif_tx_stop_queue(txq);
10476 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10477 return NETDEV_TX_BUSY;
10480 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10481 " gso type %x xmit_type %x\n",
10482 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10483 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10485 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10486 /* First, check if we need to linearize the skb (due to FW
10487 restrictions). No need to check fragmentation if page size > 8K
10488 (there will be no violation to FW restrictions) */
10489 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10490 /* Statistics of linearization */
10492 if (skb_linearize(skb) != 0) {
10493 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10494 "silently dropping this SKB\n");
10495 dev_kfree_skb_any(skb);
10496 return NETDEV_TX_OK;
10502 Please read carefully. First we use one BD which we mark as start,
10503 then for TSO or xsum we have a parsing info BD,
10504 and only then we have the rest of the TSO BDs.
10505 (don't forget to mark the last one as last,
10506 and to unmap only AFTER you write to the BD ...)
10507 And above all, all pdb sizes are in words - NOT DWORDS!
10510 pkt_prod = fp->tx_pkt_prod++;
10511 bd_prod = TX_BD(fp->tx_bd_prod);
10513 /* get a tx_buf and first BD */
10514 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10515 tx_bd = &fp->tx_desc_ring[bd_prod];
10517 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10518 tx_bd->general_data = (UNICAST_ADDRESS <<
10519 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10521 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10523 /* remember the first BD of the packet */
10524 tx_buf->first_bd = fp->tx_bd_prod;
10527 DP(NETIF_MSG_TX_QUEUED,
10528 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10529 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10532 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10533 (bp->flags & HW_VLAN_TX_FLAG)) {
10534 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10535 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10539 tx_bd->vlan = cpu_to_le16(pkt_prod);
10542 /* turn on parsing and get a BD */
10543 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10544 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10546 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10549 if (xmit_type & XMIT_CSUM) {
10550 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10552 /* for now NS flag is not used in Linux */
10554 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10555 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10557 pbd->ip_hlen = (skb_transport_header(skb) -
10558 skb_network_header(skb)) / 2;
10560 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10562 pbd->total_hlen = cpu_to_le16(hlen);
10563 hlen = hlen*2 - vlan_off;
10565 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10567 if (xmit_type & XMIT_CSUM_V4)
10568 tx_bd->bd_flags.as_bitfield |=
10569 ETH_TX_BD_FLAGS_IP_CSUM;
10571 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10573 if (xmit_type & XMIT_CSUM_TCP) {
10574 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10577 s8 fix = SKB_CS_OFF(skb); /* signed! */
10579 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10580 pbd->cs_offset = fix / 2;
10582 DP(NETIF_MSG_TX_QUEUED,
10583 "hlen %d offset %d fix %d csum before fix %x\n",
10584 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10587 /* HW bug: fixup the CSUM */
10588 pbd->tcp_pseudo_csum =
10589 bnx2x_csum_fix(skb_transport_header(skb),
10592 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10593 pbd->tcp_pseudo_csum);
10597 mapping = pci_map_single(bp->pdev, skb->data,
10598 skb_headlen(skb), PCI_DMA_TODEVICE);
10600 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10601 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10602 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10603 tx_bd->nbd = cpu_to_le16(nbd);
10604 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10606 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10607 " nbytes %d flags %x vlan %x\n",
10608 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10609 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10610 le16_to_cpu(tx_bd->vlan));
10612 if (xmit_type & XMIT_GSO) {
10614 DP(NETIF_MSG_TX_QUEUED,
10615 "TSO packet len %d hlen %d total len %d tso size %d\n",
10616 skb->len, hlen, skb_headlen(skb),
10617 skb_shinfo(skb)->gso_size);
10619 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10621 if (unlikely(skb_headlen(skb) > hlen))
10622 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10625 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10626 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10627 pbd->tcp_flags = pbd_tcp_flags(skb);
10629 if (xmit_type & XMIT_GSO_V4) {
10630 pbd->ip_id = swab16(ip_hdr(skb)->id);
10631 pbd->tcp_pseudo_csum =
10632 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10633 ip_hdr(skb)->daddr,
10634 0, IPPROTO_TCP, 0));
10637 pbd->tcp_pseudo_csum =
10638 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10639 &ipv6_hdr(skb)->daddr,
10640 0, IPPROTO_TCP, 0));
10642 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10645 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10646 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10648 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10649 tx_bd = &fp->tx_desc_ring[bd_prod];
10651 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10652 frag->size, PCI_DMA_TODEVICE);
10654 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10655 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10656 tx_bd->nbytes = cpu_to_le16(frag->size);
10657 tx_bd->vlan = cpu_to_le16(pkt_prod);
10658 tx_bd->bd_flags.as_bitfield = 0;
10660 DP(NETIF_MSG_TX_QUEUED,
10661 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10662 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10663 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10666 /* now at last mark the BD as the last BD */
10667 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10669 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10670 tx_bd, tx_bd->bd_flags.as_bitfield);
10672 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10674 /* now send a tx doorbell, counting the next BD
10675 * if the packet contains or ends with it
10677 if (TX_BD_POFF(bd_prod) < nbd)
10681 DP(NETIF_MSG_TX_QUEUED,
10682 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10683 " tcp_flags %x xsum %x seq %u hlen %u\n",
10684 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10685 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10686 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10688 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10691 * Make sure that the BD data is updated before updating the producer
10692 * since FW might read the BD right after the producer is updated.
10693 * This is only applicable for weak-ordered memory model archs such
10694 * as IA-64. The following barrier is also mandatory since FW will
10695 * assumes packets must have BDs.
10699 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10700 mb(); /* FW restriction: must not reorder writing nbd and packets */
10701 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10702 DOORBELL(bp, fp->index, 0);
10706 fp->tx_bd_prod += nbd;
10708 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10709 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10710 if we put Tx into XOFF state. */
10712 netif_tx_stop_queue(txq);
10713 fp->eth_q_stats.driver_xoff++;
10714 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10715 netif_tx_wake_queue(txq);
10719 return NETDEV_TX_OK;
10722 /* called with rtnl_lock */
10723 static int bnx2x_open(struct net_device *dev)
10725 struct bnx2x *bp = netdev_priv(dev);
10727 netif_carrier_off(dev);
10729 bnx2x_set_power_state(bp, PCI_D0);
10731 return bnx2x_nic_load(bp, LOAD_OPEN);
10734 /* called with rtnl_lock */
10735 static int bnx2x_close(struct net_device *dev)
10737 struct bnx2x *bp = netdev_priv(dev);
10739 /* Unload the driver, release IRQs */
10740 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10741 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10742 if (!CHIP_REV_IS_SLOW(bp))
10743 bnx2x_set_power_state(bp, PCI_D3hot);
10748 /* called with netif_tx_lock from dev_mcast.c */
10749 static void bnx2x_set_rx_mode(struct net_device *dev)
10751 struct bnx2x *bp = netdev_priv(dev);
10752 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10753 int port = BP_PORT(bp);
10755 if (bp->state != BNX2X_STATE_OPEN) {
10756 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10760 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10762 if (dev->flags & IFF_PROMISC)
10763 rx_mode = BNX2X_RX_MODE_PROMISC;
10765 else if ((dev->flags & IFF_ALLMULTI) ||
10766 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10767 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10769 else { /* some multicasts */
10770 if (CHIP_IS_E1(bp)) {
10771 int i, old, offset;
10772 struct dev_mc_list *mclist;
10773 struct mac_configuration_cmd *config =
10774 bnx2x_sp(bp, mcast_config);
10776 for (i = 0, mclist = dev->mc_list;
10777 mclist && (i < dev->mc_count);
10778 i++, mclist = mclist->next) {
10780 config->config_table[i].
10781 cam_entry.msb_mac_addr =
10782 swab16(*(u16 *)&mclist->dmi_addr[0]);
10783 config->config_table[i].
10784 cam_entry.middle_mac_addr =
10785 swab16(*(u16 *)&mclist->dmi_addr[2]);
10786 config->config_table[i].
10787 cam_entry.lsb_mac_addr =
10788 swab16(*(u16 *)&mclist->dmi_addr[4]);
10789 config->config_table[i].cam_entry.flags =
10791 config->config_table[i].
10792 target_table_entry.flags = 0;
10793 config->config_table[i].
10794 target_table_entry.client_id = 0;
10795 config->config_table[i].
10796 target_table_entry.vlan_id = 0;
10799 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10800 config->config_table[i].
10801 cam_entry.msb_mac_addr,
10802 config->config_table[i].
10803 cam_entry.middle_mac_addr,
10804 config->config_table[i].
10805 cam_entry.lsb_mac_addr);
10807 old = config->hdr.length;
10809 for (; i < old; i++) {
10810 if (CAM_IS_INVALID(config->
10811 config_table[i])) {
10812 /* already invalidated */
10816 CAM_INVALIDATE(config->
10821 if (CHIP_REV_IS_SLOW(bp))
10822 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10824 offset = BNX2X_MAX_MULTICAST*(1 + port);
10826 config->hdr.length = i;
10827 config->hdr.offset = offset;
10828 config->hdr.client_id = bp->fp->cl_id;
10829 config->hdr.reserved1 = 0;
10831 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10832 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10833 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10836 /* Accept one or more multicasts */
10837 struct dev_mc_list *mclist;
10838 u32 mc_filter[MC_HASH_SIZE];
10839 u32 crc, bit, regidx;
10842 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10844 for (i = 0, mclist = dev->mc_list;
10845 mclist && (i < dev->mc_count);
10846 i++, mclist = mclist->next) {
10848 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10851 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10852 bit = (crc >> 24) & 0xff;
10855 mc_filter[regidx] |= (1 << bit);
10858 for (i = 0; i < MC_HASH_SIZE; i++)
10859 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10864 bp->rx_mode = rx_mode;
10865 bnx2x_set_storm_rx_mode(bp);
10868 /* called with rtnl_lock */
10869 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10871 struct sockaddr *addr = p;
10872 struct bnx2x *bp = netdev_priv(dev);
10874 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10877 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10878 if (netif_running(dev)) {
10879 if (CHIP_IS_E1(bp))
10880 bnx2x_set_mac_addr_e1(bp, 1);
10882 bnx2x_set_mac_addr_e1h(bp, 1);
10888 /* called with rtnl_lock */
10889 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10891 struct mii_ioctl_data *data = if_mii(ifr);
10892 struct bnx2x *bp = netdev_priv(dev);
10893 int port = BP_PORT(bp);
10898 data->phy_id = bp->port.phy_addr;
10902 case SIOCGMIIREG: {
10905 if (!netif_running(dev))
10908 mutex_lock(&bp->port.phy_mutex);
10909 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10910 DEFAULT_PHY_DEV_ADDR,
10911 (data->reg_num & 0x1f), &mii_regval);
10912 data->val_out = mii_regval;
10913 mutex_unlock(&bp->port.phy_mutex);
10918 if (!capable(CAP_NET_ADMIN))
10921 if (!netif_running(dev))
10924 mutex_lock(&bp->port.phy_mutex);
10925 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10926 DEFAULT_PHY_DEV_ADDR,
10927 (data->reg_num & 0x1f), data->val_in);
10928 mutex_unlock(&bp->port.phy_mutex);
10936 return -EOPNOTSUPP;
10939 /* called with rtnl_lock */
10940 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10942 struct bnx2x *bp = netdev_priv(dev);
10945 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10946 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10949 /* This does not race with packet allocation
10950 * because the actual alloc size is
10951 * only updated as part of load
10953 dev->mtu = new_mtu;
10955 if (netif_running(dev)) {
10956 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10957 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10963 static void bnx2x_tx_timeout(struct net_device *dev)
10965 struct bnx2x *bp = netdev_priv(dev);
10967 #ifdef BNX2X_STOP_ON_ERROR
10971 /* This allows the netif to be shutdown gracefully before resetting */
10972 schedule_work(&bp->reset_task);
10976 /* called with rtnl_lock */
10977 static void bnx2x_vlan_rx_register(struct net_device *dev,
10978 struct vlan_group *vlgrp)
10980 struct bnx2x *bp = netdev_priv(dev);
10984 /* Set flags according to the required capabilities */
10985 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10987 if (dev->features & NETIF_F_HW_VLAN_TX)
10988 bp->flags |= HW_VLAN_TX_FLAG;
10990 if (dev->features & NETIF_F_HW_VLAN_RX)
10991 bp->flags |= HW_VLAN_RX_FLAG;
10993 if (netif_running(dev))
10994 bnx2x_set_client_config(bp);
10999 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11000 static void poll_bnx2x(struct net_device *dev)
11002 struct bnx2x *bp = netdev_priv(dev);
11004 disable_irq(bp->pdev->irq);
11005 bnx2x_interrupt(bp->pdev->irq, dev);
11006 enable_irq(bp->pdev->irq);
11010 static const struct net_device_ops bnx2x_netdev_ops = {
11011 .ndo_open = bnx2x_open,
11012 .ndo_stop = bnx2x_close,
11013 .ndo_start_xmit = bnx2x_start_xmit,
11014 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11015 .ndo_set_mac_address = bnx2x_change_mac_addr,
11016 .ndo_validate_addr = eth_validate_addr,
11017 .ndo_do_ioctl = bnx2x_ioctl,
11018 .ndo_change_mtu = bnx2x_change_mtu,
11019 .ndo_tx_timeout = bnx2x_tx_timeout,
11021 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11023 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11024 .ndo_poll_controller = poll_bnx2x,
11028 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11029 struct net_device *dev)
11034 SET_NETDEV_DEV(dev, &pdev->dev);
11035 bp = netdev_priv(dev);
11040 bp->func = PCI_FUNC(pdev->devfn);
11042 rc = pci_enable_device(pdev);
11044 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11048 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11049 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11052 goto err_out_disable;
11055 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11056 printk(KERN_ERR PFX "Cannot find second PCI device"
11057 " base address, aborting\n");
11059 goto err_out_disable;
11062 if (atomic_read(&pdev->enable_cnt) == 1) {
11063 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11065 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11067 goto err_out_disable;
11070 pci_set_master(pdev);
11071 pci_save_state(pdev);
11074 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11075 if (bp->pm_cap == 0) {
11076 printk(KERN_ERR PFX "Cannot find power management"
11077 " capability, aborting\n");
11079 goto err_out_release;
11082 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11083 if (bp->pcie_cap == 0) {
11084 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11087 goto err_out_release;
11090 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11091 bp->flags |= USING_DAC_FLAG;
11092 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11093 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11094 " failed, aborting\n");
11096 goto err_out_release;
11099 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11100 printk(KERN_ERR PFX "System does not support DMA,"
11103 goto err_out_release;
11106 dev->mem_start = pci_resource_start(pdev, 0);
11107 dev->base_addr = dev->mem_start;
11108 dev->mem_end = pci_resource_end(pdev, 0);
11110 dev->irq = pdev->irq;
11112 bp->regview = pci_ioremap_bar(pdev, 0);
11113 if (!bp->regview) {
11114 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11116 goto err_out_release;
11119 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11120 min_t(u64, BNX2X_DB_SIZE,
11121 pci_resource_len(pdev, 2)));
11122 if (!bp->doorbells) {
11123 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11125 goto err_out_unmap;
11128 bnx2x_set_power_state(bp, PCI_D0);
11130 /* clean indirect addresses */
11131 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11132 PCICFG_VENDOR_ID_OFFSET);
11133 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11134 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11135 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11136 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11138 dev->watchdog_timeo = TX_TIMEOUT;
11140 dev->netdev_ops = &bnx2x_netdev_ops;
11141 dev->ethtool_ops = &bnx2x_ethtool_ops;
11142 dev->features |= NETIF_F_SG;
11143 dev->features |= NETIF_F_HW_CSUM;
11144 if (bp->flags & USING_DAC_FLAG)
11145 dev->features |= NETIF_F_HIGHDMA;
11147 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11148 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11150 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11151 dev->features |= NETIF_F_TSO6;
11157 iounmap(bp->regview);
11158 bp->regview = NULL;
11160 if (bp->doorbells) {
11161 iounmap(bp->doorbells);
11162 bp->doorbells = NULL;
11166 if (atomic_read(&pdev->enable_cnt) == 1)
11167 pci_release_regions(pdev);
11170 pci_disable_device(pdev);
11171 pci_set_drvdata(pdev, NULL);
11177 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11179 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11181 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11185 /* return value of 1=2.5GHz 2=5GHz */
11186 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11188 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11190 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11193 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11195 struct bnx2x_fw_file_hdr *fw_hdr;
11196 struct bnx2x_fw_file_section *sections;
11198 u32 offset, len, num_ops;
11200 const struct firmware *firmware = bp->firmware;
11203 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11206 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11207 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11209 /* Make sure none of the offsets and sizes make us read beyond
11210 * the end of the firmware data */
11211 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11212 offset = be32_to_cpu(sections[i].offset);
11213 len = be32_to_cpu(sections[i].len);
11214 if (offset + len > firmware->size) {
11215 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11220 /* Likewise for the init_ops offsets */
11221 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11222 ops_offsets = (u16 *)(firmware->data + offset);
11223 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11225 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11226 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11227 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11232 /* Check FW version */
11233 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11234 fw_ver = firmware->data + offset;
11235 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11236 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11237 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11238 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11239 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11240 " Should be %d.%d.%d.%d\n",
11241 fw_ver[0], fw_ver[1], fw_ver[2],
11242 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11243 BCM_5710_FW_MINOR_VERSION,
11244 BCM_5710_FW_REVISION_VERSION,
11245 BCM_5710_FW_ENGINEERING_VERSION);
11252 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11255 const __be32 *source = (const __be32*)_source;
11256 u32 *target = (u32*)_target;
11258 for (i = 0; i < n/4; i++)
11259 target[i] = be32_to_cpu(source[i]);
11263 Ops array is stored in the following format:
11264 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11266 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11269 const __be32 *source = (const __be32*)_source;
11270 struct raw_op *target = (struct raw_op*)_target;
11272 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11273 tmp = be32_to_cpu(source[j]);
11274 target[i].op = (tmp >> 24) & 0xff;
11275 target[i].offset = tmp & 0xffffff;
11276 target[i].raw_data = be32_to_cpu(source[j+1]);
11279 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11282 u16 *target = (u16*)_target;
11283 const __be16 *source = (const __be16*)_source;
11285 for (i = 0; i < n/2; i++)
11286 target[i] = be16_to_cpu(source[i]);
11289 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11291 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11292 bp->arr = kmalloc(len, GFP_KERNEL); \
11294 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11297 func(bp->firmware->data + \
11298 be32_to_cpu(fw_hdr->arr.offset), \
11299 (u8*)bp->arr, len); \
11303 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11305 char fw_file_name[40] = {0};
11307 struct bnx2x_fw_file_hdr *fw_hdr;
11309 /* Create a FW file name */
11310 if (CHIP_IS_E1(bp))
11311 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11313 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11315 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11316 BCM_5710_FW_MAJOR_VERSION,
11317 BCM_5710_FW_MINOR_VERSION,
11318 BCM_5710_FW_REVISION_VERSION,
11319 BCM_5710_FW_ENGINEERING_VERSION);
11321 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11323 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11325 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11326 goto request_firmware_exit;
11329 rc = bnx2x_check_firmware(bp);
11331 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11332 goto request_firmware_exit;
11335 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11337 /* Initialize the pointers to the init arrays */
11339 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11342 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11345 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11347 /* STORMs firmware */
11348 bp->tsem_int_table_data = bp->firmware->data +
11349 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11350 bp->tsem_pram_data = bp->firmware->data +
11351 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11352 bp->usem_int_table_data = bp->firmware->data +
11353 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11354 bp->usem_pram_data = bp->firmware->data +
11355 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11356 bp->xsem_int_table_data = bp->firmware->data +
11357 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11358 bp->xsem_pram_data = bp->firmware->data +
11359 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11360 bp->csem_int_table_data = bp->firmware->data +
11361 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11362 bp->csem_pram_data = bp->firmware->data +
11363 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11366 init_offsets_alloc_err:
11367 kfree(bp->init_ops);
11368 init_ops_alloc_err:
11369 kfree(bp->init_data);
11370 request_firmware_exit:
11371 release_firmware(bp->firmware);
11378 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11379 const struct pci_device_id *ent)
11381 static int version_printed;
11382 struct net_device *dev = NULL;
11386 if (version_printed++ == 0)
11387 printk(KERN_INFO "%s", version);
11389 /* dev zeroed in init_etherdev */
11390 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11392 printk(KERN_ERR PFX "Cannot allocate net device\n");
11396 bp = netdev_priv(dev);
11397 bp->msglevel = debug;
11399 rc = bnx2x_init_dev(pdev, dev);
11405 pci_set_drvdata(pdev, dev);
11407 rc = bnx2x_init_bp(bp);
11409 goto init_one_exit;
11411 /* Set init arrays */
11412 rc = bnx2x_init_firmware(bp, &pdev->dev);
11414 printk(KERN_ERR PFX "Error loading firmware\n");
11415 goto init_one_exit;
11418 rc = register_netdev(dev);
11420 dev_err(&pdev->dev, "Cannot register net device\n");
11421 goto init_one_exit;
11424 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11425 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11426 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11427 bnx2x_get_pcie_width(bp),
11428 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11429 dev->base_addr, bp->pdev->irq);
11430 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11436 iounmap(bp->regview);
11439 iounmap(bp->doorbells);
11443 if (atomic_read(&pdev->enable_cnt) == 1)
11444 pci_release_regions(pdev);
11446 pci_disable_device(pdev);
11447 pci_set_drvdata(pdev, NULL);
11452 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11454 struct net_device *dev = pci_get_drvdata(pdev);
11458 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11461 bp = netdev_priv(dev);
11463 unregister_netdev(dev);
11465 kfree(bp->init_ops_offsets);
11466 kfree(bp->init_ops);
11467 kfree(bp->init_data);
11468 release_firmware(bp->firmware);
11471 iounmap(bp->regview);
11474 iounmap(bp->doorbells);
11478 if (atomic_read(&pdev->enable_cnt) == 1)
11479 pci_release_regions(pdev);
11481 pci_disable_device(pdev);
11482 pci_set_drvdata(pdev, NULL);
11485 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11487 struct net_device *dev = pci_get_drvdata(pdev);
11491 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11494 bp = netdev_priv(dev);
11498 pci_save_state(pdev);
11500 if (!netif_running(dev)) {
11505 netif_device_detach(dev);
11507 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11509 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11516 static int bnx2x_resume(struct pci_dev *pdev)
11518 struct net_device *dev = pci_get_drvdata(pdev);
11523 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11526 bp = netdev_priv(dev);
11530 pci_restore_state(pdev);
11532 if (!netif_running(dev)) {
11537 bnx2x_set_power_state(bp, PCI_D0);
11538 netif_device_attach(dev);
11540 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11547 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11551 bp->state = BNX2X_STATE_ERROR;
11553 bp->rx_mode = BNX2X_RX_MODE_NONE;
11555 bnx2x_netif_stop(bp, 0);
11557 del_timer_sync(&bp->timer);
11558 bp->stats_state = STATS_STATE_DISABLED;
11559 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11562 bnx2x_free_irq(bp);
11564 if (CHIP_IS_E1(bp)) {
11565 struct mac_configuration_cmd *config =
11566 bnx2x_sp(bp, mcast_config);
11568 for (i = 0; i < config->hdr.length; i++)
11569 CAM_INVALIDATE(config->config_table[i]);
11572 /* Free SKBs, SGEs, TPA pool and driver internals */
11573 bnx2x_free_skbs(bp);
11574 for_each_rx_queue(bp, i)
11575 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11576 for_each_rx_queue(bp, i)
11577 netif_napi_del(&bnx2x_fp(bp, i, napi));
11578 bnx2x_free_mem(bp);
11580 bp->state = BNX2X_STATE_CLOSED;
11582 netif_carrier_off(bp->dev);
11587 static void bnx2x_eeh_recover(struct bnx2x *bp)
11591 mutex_init(&bp->port.phy_mutex);
11593 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11594 bp->link_params.shmem_base = bp->common.shmem_base;
11595 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11597 if (!bp->common.shmem_base ||
11598 (bp->common.shmem_base < 0xA0000) ||
11599 (bp->common.shmem_base >= 0xC0000)) {
11600 BNX2X_DEV_INFO("MCP not active\n");
11601 bp->flags |= NO_MCP_FLAG;
11605 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11606 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11607 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11608 BNX2X_ERR("BAD MCP validity signature\n");
11610 if (!BP_NOMCP(bp)) {
11611 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11612 & DRV_MSG_SEQ_NUMBER_MASK);
11613 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11618 * bnx2x_io_error_detected - called when PCI error is detected
11619 * @pdev: Pointer to PCI device
11620 * @state: The current pci connection state
11622 * This function is called after a PCI bus error affecting
11623 * this device has been detected.
11625 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11626 pci_channel_state_t state)
11628 struct net_device *dev = pci_get_drvdata(pdev);
11629 struct bnx2x *bp = netdev_priv(dev);
11633 netif_device_detach(dev);
11635 if (netif_running(dev))
11636 bnx2x_eeh_nic_unload(bp);
11638 pci_disable_device(pdev);
11642 /* Request a slot reset */
11643 return PCI_ERS_RESULT_NEED_RESET;
11647 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11648 * @pdev: Pointer to PCI device
11650 * Restart the card from scratch, as if from a cold-boot.
11652 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11654 struct net_device *dev = pci_get_drvdata(pdev);
11655 struct bnx2x *bp = netdev_priv(dev);
11659 if (pci_enable_device(pdev)) {
11660 dev_err(&pdev->dev,
11661 "Cannot re-enable PCI device after reset\n");
11663 return PCI_ERS_RESULT_DISCONNECT;
11666 pci_set_master(pdev);
11667 pci_restore_state(pdev);
11669 if (netif_running(dev))
11670 bnx2x_set_power_state(bp, PCI_D0);
11674 return PCI_ERS_RESULT_RECOVERED;
11678 * bnx2x_io_resume - called when traffic can start flowing again
11679 * @pdev: Pointer to PCI device
11681 * This callback is called when the error recovery driver tells us that
11682 * its OK to resume normal operation.
11684 static void bnx2x_io_resume(struct pci_dev *pdev)
11686 struct net_device *dev = pci_get_drvdata(pdev);
11687 struct bnx2x *bp = netdev_priv(dev);
11691 bnx2x_eeh_recover(bp);
11693 if (netif_running(dev))
11694 bnx2x_nic_load(bp, LOAD_NORMAL);
11696 netif_device_attach(dev);
11701 static struct pci_error_handlers bnx2x_err_handler = {
11702 .error_detected = bnx2x_io_error_detected,
11703 .slot_reset = bnx2x_io_slot_reset,
11704 .resume = bnx2x_io_resume,
11707 static struct pci_driver bnx2x_pci_driver = {
11708 .name = DRV_MODULE_NAME,
11709 .id_table = bnx2x_pci_tbl,
11710 .probe = bnx2x_init_one,
11711 .remove = __devexit_p(bnx2x_remove_one),
11712 .suspend = bnx2x_suspend,
11713 .resume = bnx2x_resume,
11714 .err_handler = &bnx2x_err_handler,
11717 static int __init bnx2x_init(void)
11721 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11722 if (bnx2x_wq == NULL) {
11723 printk(KERN_ERR PFX "Cannot create workqueue\n");
11727 ret = pci_register_driver(&bnx2x_pci_driver);
11729 printk(KERN_ERR PFX "Cannot register driver\n");
11730 destroy_workqueue(bnx2x_wq);
11735 static void __exit bnx2x_cleanup(void)
11737 pci_unregister_driver(&bnx2x_pci_driver);
11739 destroy_workqueue(bnx2x_wq);
11742 module_init(bnx2x_init);
11743 module_exit(bnx2x_cleanup);