1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
57 #define DRV_MODULE_VERSION "1.45.26"
58 #define DRV_MODULE_RELDATE "2009/01/26"
59 #define BNX2X_BC_VER 0x040200
61 /* Time in jiffies before concluding the transmitter is hung */
62 #define TX_TIMEOUT (5*HZ)
64 static char version[] __devinitdata =
65 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
66 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Eliezer Tamir");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int multi_mode = 1;
74 module_param(multi_mode, int, 0);
76 static int disable_tpa;
79 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 module_param(disable_tpa, int, 0);
84 module_param(int_mode, int, 0);
85 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
87 module_param(poll, int, 0);
90 module_param(mrrs, int, 0);
91 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
95 MODULE_PARM_DESC(poll, "use polling (for debug)");
96 MODULE_PARM_DESC(debug, "default debug msglevel");
98 static struct workqueue_struct *bnx2x_wq;
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 BNX2X_ERR("dmae timeout!\n");
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
250 mutex_unlock(&bp->dmae_mutex);
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 struct dmae_command *dmae = &bp->init_dmae;
256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 mutex_lock(&bp->dmae_mutex);
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 DMAE_CMD_ENDIANITY_DW_SWAP |
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_val = DMAE_COMP_VAL;
294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
308 while (*wb_comp != DMAE_COMP_VAL) {
311 BNX2X_ERR("dmae timeout!\n");
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
325 mutex_unlock(&bp->dmae_mutex);
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343 REG_RD_DMAE(bp, reg, wb_data, 2);
345 return HILO_U64(wb_data[0], wb_data[1]);
349 static int bnx2x_mc_assert(struct bnx2x *bp)
353 u32 row0, row1, row2, row3;
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
470 static void bnx2x_fw_dump(struct bnx2x *bp)
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 printk(KERN_CONT "%s", (char *)data);
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 printk(KERN_CONT "%s", (char *)data);
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
497 static void bnx2x_panic_dump(struct bnx2x *bp)
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
505 BNX2X_ERR("begin crash dump -----------------\n");
509 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
510 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
511 " spq_prod_idx(%u)\n",
512 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
516 for_each_rx_queue(bp, i) {
517 struct bnx2x_fastpath *fp = &bp->fp[i];
519 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
520 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
521 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
522 i, fp->rx_bd_prod, fp->rx_bd_cons,
523 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
526 " fp_u_idx(%x) *sb_u_idx(%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge,
528 le16_to_cpu(fp->fp_u_idx),
529 fp->status_blk->u_status_block.status_block_index);
533 for_each_tx_queue(bp, i) {
534 struct bnx2x_fastpath *fp = &bp->fp[i];
535 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
537 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
538 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
539 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
542 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543 fp->status_blk->c_status_block.status_block_index,
544 hw_prods->packets_prod, hw_prods->bds_prod);
549 for_each_rx_queue(bp, i) {
550 struct bnx2x_fastpath *fp = &bp->fp[i];
552 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
554 for (j = start; j != end; j = RX_BD(j + 1)) {
555 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
558 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
559 j, rx_bd[1], rx_bd[0], sw_bd->skb);
562 start = RX_SGE(fp->rx_sge_prod);
563 end = RX_SGE(fp->last_max_sge);
564 for (j = start; j != end; j = RX_SGE(j + 1)) {
565 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
568 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
569 j, rx_sge[1], rx_sge[0], sw_page->page);
572 start = RCQ_BD(fp->rx_comp_cons - 10);
573 end = RCQ_BD(fp->rx_comp_cons + 503);
574 for (j = start; j != end; j = RCQ_BD(j + 1)) {
575 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
577 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578 j, cqe[0], cqe[1], cqe[2], cqe[3]);
583 for_each_tx_queue(bp, i) {
584 struct bnx2x_fastpath *fp = &bp->fp[i];
586 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588 for (j = start; j != end; j = TX_BD(j + 1)) {
589 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
591 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592 sw_bd->skb, sw_bd->first_bd);
595 start = TX_BD(fp->tx_bd_cons - 10);
596 end = TX_BD(fp->tx_bd_cons + 254);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
600 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
607 BNX2X_ERR("end crash dump -----------------\n");
610 static void bnx2x_int_enable(struct bnx2x *bp)
612 int port = BP_PORT(bp);
613 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614 u32 val = REG_RD(bp, addr);
615 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
616 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
619 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620 HC_CONFIG_0_REG_INT_LINE_EN_0);
621 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
624 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
629 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_INT_LINE_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
634 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
637 REG_WR(bp, addr, val);
639 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
642 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
643 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
645 REG_WR(bp, addr, val);
647 if (CHIP_IS_E1H(bp)) {
648 /* init leading/trailing edge */
650 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
652 /* enable nig and gpio3 attention */
657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
662 static void bnx2x_int_disable(struct bnx2x *bp)
664 int port = BP_PORT(bp);
665 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666 u32 val = REG_RD(bp, addr);
668 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 /* flush all outstanding writes */
679 REG_WR(bp, addr, val);
680 if (REG_RD(bp, addr) != val)
681 BNX2X_ERR("BUG! proper val not read from IGU!\n");
684 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
686 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
689 /* disable interrupt handling */
690 atomic_inc(&bp->intr_sem);
692 /* prevent the HW from sending interrupts */
693 bnx2x_int_disable(bp);
695 /* make sure all ISRs are done */
697 synchronize_irq(bp->msix_table[0].vector);
699 for_each_queue(bp, i)
700 synchronize_irq(bp->msix_table[i + offset].vector);
702 synchronize_irq(bp->pdev->irq);
704 /* make sure sp_task is not running */
705 cancel_delayed_work(&bp->sp_task);
706 flush_workqueue(bnx2x_wq);
712 * General service functions
715 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
716 u8 storm, u16 index, u8 op, u8 update)
718 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
719 COMMAND_REG_INT_ACK);
720 struct igu_ack_register igu_ack;
722 igu_ack.status_block_index = index;
723 igu_ack.sb_id_and_flags =
724 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
725 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
726 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
727 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
729 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
730 (*(u32 *)&igu_ack), hc_addr);
731 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
734 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
736 struct host_status_block *fpsb = fp->status_blk;
739 barrier(); /* status block is written to by the chip */
740 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
741 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
744 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
745 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
751 static u16 bnx2x_ack_int(struct bnx2x *bp)
753 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
754 COMMAND_REG_SIMD_MASK);
755 u32 result = REG_RD(bp, hc_addr);
757 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
765 * fast path service functions
768 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
772 /* Tell compiler that status block fields can change */
774 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
775 return (fp->tx_pkt_cons != tx_cons_sb);
778 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
780 /* Tell compiler that consumer and producer can change */
782 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
786 /* free skb in the packet ring at pos idx
787 * return idx of last bd freed
789 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
792 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793 struct eth_tx_bd *tx_bd;
794 struct sk_buff *skb = tx_buf->skb;
795 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
798 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
802 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
807 nbd = le16_to_cpu(tx_bd->nbd) - 1;
808 new_cons = nbd + tx_buf->first_bd;
809 #ifdef BNX2X_STOP_ON_ERROR
810 if (nbd > (MAX_SKB_FRAGS + 2)) {
811 BNX2X_ERR("BAD nbd!\n");
816 /* Skip a parse bd and the TSO split header bd
817 since they have no mapping */
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
821 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822 ETH_TX_BD_FLAGS_TCP_CSUM |
823 ETH_TX_BD_FLAGS_SW_LSO)) {
825 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 /* is this a TSO split header bd? */
828 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
830 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
837 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838 tx_bd = &fp->tx_desc_ring[bd_idx];
839 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
848 tx_buf->first_bd = 0;
854 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
860 barrier(); /* Tell compiler that prod and cons can change */
861 prod = fp->tx_bd_prod;
862 cons = fp->tx_bd_cons;
864 /* NUM_TX_RINGS = number of "next-page" entries
865 It will be used as a threshold */
866 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
868 #ifdef BNX2X_STOP_ON_ERROR
870 WARN_ON(used > fp->bp->tx_ring_size);
871 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
874 return (s16)(fp->bp->tx_ring_size) - used;
877 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
879 struct bnx2x *bp = fp->bp;
880 struct netdev_queue *txq;
881 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
884 #ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
889 txq = netdev_get_tx_queue(bp->dev, fp->index);
890 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891 sw_cons = fp->tx_pkt_cons;
893 while (sw_cons != hw_cons) {
896 pkt_cons = TX_BD(sw_cons);
898 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
900 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
901 hw_cons, sw_cons, pkt_cons);
903 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
905 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
908 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
916 fp->tx_pkt_cons = sw_cons;
917 fp->tx_bd_cons = bd_cons;
919 /* Need to make the tx_bd_cons update visible to start_xmit()
920 * before checking for netif_tx_queue_stopped(). Without the
921 * memory barrier, there is a small possibility that start_xmit()
922 * will miss it and cause the queue to be stopped forever.
926 /* TBD need a thresh? */
927 if (unlikely(netif_tx_queue_stopped(txq))) {
929 __netif_tx_lock(txq, smp_processor_id());
931 if ((netif_tx_queue_stopped(txq)) &&
932 (bp->state == BNX2X_STATE_OPEN) &&
933 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
934 netif_tx_wake_queue(txq);
936 __netif_tx_unlock(txq);
941 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942 union eth_rx_cqe *rr_cqe)
944 struct bnx2x *bp = fp->bp;
945 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
949 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
950 fp->index, cid, command, bp->state,
951 rr_cqe->ramrod_cqe.ramrod_type);
956 switch (command | fp->state) {
957 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958 BNX2X_FP_STATE_OPENING):
959 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
961 fp->state = BNX2X_FP_STATE_OPEN;
964 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
967 fp->state = BNX2X_FP_STATE_HALTED;
971 BNX2X_ERR("unexpected MC reply (%d) "
972 "fp->state is %x\n", command, fp->state);
975 mb(); /* force bnx2x_wait_ramrod() to see the change */
979 switch (command | bp->state) {
980 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982 bp->state = BNX2X_STATE_OPEN;
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988 fp->state = BNX2X_FP_STATE_HALTED;
991 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
992 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
993 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
997 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
998 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
999 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1000 bp->set_mac_pending = 0;
1003 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1008 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1009 command, bp->state);
1012 mb(); /* force bnx2x_wait_ramrod() to see the change */
1015 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016 struct bnx2x_fastpath *fp, u16 index)
1018 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019 struct page *page = sw_buf->page;
1020 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1022 /* Skip "next page" elements */
1026 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1027 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1028 __free_pages(page, PAGES_PER_SGE_SHIFT);
1030 sw_buf->page = NULL;
1035 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036 struct bnx2x_fastpath *fp, int last)
1040 for (i = 0; i < last; i++)
1041 bnx2x_free_rx_sge(bp, fp, i);
1044 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045 struct bnx2x_fastpath *fp, u16 index)
1047 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1052 if (unlikely(page == NULL))
1055 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1056 PCI_DMA_FROMDEVICE);
1057 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1062 sw_buf->page = page;
1063 pci_unmap_addr_set(sw_buf, mapping, mapping);
1065 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1071 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072 struct bnx2x_fastpath *fp, u16 index)
1074 struct sk_buff *skb;
1075 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1079 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080 if (unlikely(skb == NULL))
1083 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1084 PCI_DMA_FROMDEVICE);
1085 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1091 pci_unmap_addr_set(rx_buf, mapping, mapping);
1093 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1099 /* note that we are not allocating a new skb,
1100 * we are just moving one from cons to prod
1101 * we are not creating a new mapping,
1102 * so there is no need to check for dma_mapping_error().
1104 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105 struct sk_buff *skb, u16 cons, u16 prod)
1107 struct bnx2x *bp = fp->bp;
1108 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1113 pci_dma_sync_single_for_device(bp->pdev,
1114 pci_unmap_addr(cons_rx_buf, mapping),
1115 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1117 prod_rx_buf->skb = cons_rx_buf->skb;
1118 pci_unmap_addr_set(prod_rx_buf, mapping,
1119 pci_unmap_addr(cons_rx_buf, mapping));
1120 *prod_bd = *cons_bd;
1123 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1126 u16 last_max = fp->last_max_sge;
1128 if (SUB_S16(idx, last_max) > 0)
1129 fp->last_max_sge = idx;
1132 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1136 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137 int idx = RX_SGE_CNT * i - 1;
1139 for (j = 0; j < 2; j++) {
1140 SGE_MASK_CLEAR_BIT(fp, idx);
1146 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147 struct eth_fast_path_rx_cqe *fp_cqe)
1149 struct bnx2x *bp = fp->bp;
1150 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1151 le16_to_cpu(fp_cqe->len_on_bd)) >>
1153 u16 last_max, last_elem, first_elem;
1160 /* First mark all used pages */
1161 for (i = 0; i < sge_len; i++)
1162 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1164 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1167 /* Here we assume that the last SGE index is the biggest */
1168 prefetch((void *)(fp->sge_mask));
1169 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1171 last_max = RX_SGE(fp->last_max_sge);
1172 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1175 /* If ring is not full */
1176 if (last_elem + 1 != first_elem)
1179 /* Now update the prod */
1180 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181 if (likely(fp->sge_mask[i]))
1184 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185 delta += RX_SGE_MASK_ELEM_SZ;
1189 fp->rx_sge_prod += delta;
1190 /* clear page-end entries */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1194 DP(NETIF_MSG_RX_STATUS,
1195 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1196 fp->last_max_sge, fp->rx_sge_prod);
1199 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1201 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202 memset(fp->sge_mask, 0xff,
1203 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1205 /* Clear the two last indices in the page to 1:
1206 these are the indices that correspond to the "next" element,
1207 hence will never be indicated and should be removed from
1208 the calculations. */
1209 bnx2x_clear_sge_mask_next_elems(fp);
1212 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213 struct sk_buff *skb, u16 cons, u16 prod)
1215 struct bnx2x *bp = fp->bp;
1216 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1221 /* move empty skb from pool to prod and map it */
1222 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1224 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1225 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1227 /* move partial skb from cons to pool (don't unmap yet) */
1228 fp->tpa_pool[queue] = *cons_rx_buf;
1230 /* mark bin state as start - print error if current state != stop */
1231 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1234 fp->tpa_state[queue] = BNX2X_TPA_START;
1236 /* point prod_bd to new skb */
1237 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1240 #ifdef BNX2X_STOP_ON_ERROR
1241 fp->tpa_queue_used |= (1 << queue);
1242 #ifdef __powerpc64__
1243 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1245 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1247 fp->tpa_queue_used);
1251 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252 struct sk_buff *skb,
1253 struct eth_fast_path_rx_cqe *fp_cqe,
1256 struct sw_rx_page *rx_pg, old_rx_pg;
1257 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258 u32 i, frag_len, frag_size, pages;
1262 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1263 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1265 /* This is needed in order to enable forwarding support */
1267 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1268 max(frag_size, (u32)len_on_bd));
1270 #ifdef BNX2X_STOP_ON_ERROR
1272 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1273 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1275 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1276 fp_cqe->pkt_len, len_on_bd);
1282 /* Run through the SGL and compose the fragmented skb */
1283 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1286 /* FW gives the indices of the SGE as if the ring is an array
1287 (meaning that "next" element will consume 2 indices) */
1288 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1289 rx_pg = &fp->rx_page_ring[sge_idx];
1292 /* If we fail to allocate a substitute page, we simply stop
1293 where we are and drop the whole packet */
1294 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295 if (unlikely(err)) {
1296 fp->eth_q_stats.rx_skb_alloc_failed++;
1300 /* Unmap the page as we r going to pass it to the stack */
1301 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1302 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1304 /* Add one frag and update the appropriate fields in the skb */
1305 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1307 skb->data_len += frag_len;
1308 skb->truesize += frag_len;
1309 skb->len += frag_len;
1311 frag_size -= frag_len;
1317 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1321 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322 struct sk_buff *skb = rx_buf->skb;
1324 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1326 /* Unmap skb in the pool anyway, as we are going to change
1327 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1329 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1330 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1332 if (likely(new_skb)) {
1333 /* fix ip xsum and give it to the stack */
1334 /* (no need to map the new skb) */
1337 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338 PARSING_FLAGS_VLAN);
1339 int is_not_hwaccel_vlan_cqe =
1340 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1344 prefetch(((char *)(skb)) + 128);
1346 #ifdef BNX2X_STOP_ON_ERROR
1347 if (pad + len > bp->rx_buf_size) {
1348 BNX2X_ERR("skb_put is about to fail... "
1349 "pad %d len %d rx_buf_size %d\n",
1350 pad, len, bp->rx_buf_size);
1356 skb_reserve(skb, pad);
1359 skb->protocol = eth_type_trans(skb, bp->dev);
1360 skb->ip_summed = CHECKSUM_UNNECESSARY;
1365 iph = (struct iphdr *)skb->data;
1367 /* If there is no Rx VLAN offloading -
1368 take VLAN tag into an account */
1369 if (unlikely(is_not_hwaccel_vlan_cqe))
1370 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1373 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1376 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377 &cqe->fast_path_cqe, cqe_idx)) {
1379 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380 (!is_not_hwaccel_vlan_cqe))
1381 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382 le16_to_cpu(cqe->fast_path_cqe.
1386 netif_receive_skb(skb);
1388 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389 " - dropping packet!\n");
1394 /* put new skb in bin */
1395 fp->tpa_pool[queue].skb = new_skb;
1398 /* else drop the packet and keep the buffer in the bin */
1399 DP(NETIF_MSG_RX_STATUS,
1400 "Failed to allocate new skb - dropping packet!\n");
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1404 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1407 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408 struct bnx2x_fastpath *fp,
1409 u16 bd_prod, u16 rx_comp_prod,
1412 struct ustorm_eth_rx_producers rx_prods = {0};
1415 /* Update producers */
1416 rx_prods.bd_prod = bd_prod;
1417 rx_prods.cqe_prod = rx_comp_prod;
1418 rx_prods.sge_prod = rx_sge_prod;
1421 * Make sure that the BD and SGE data is updated before updating the
1422 * producers since FW might read the BD/SGE right after the producer
1424 * This is only applicable for weak-ordered memory model archs such
1425 * as IA-64. The following barrier is also mandatory since FW will
1426 * assumes BDs must have buffers.
1430 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431 REG_WR(bp, BAR_USTRORM_INTMEM +
1432 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1433 ((u32 *)&rx_prods)[i]);
1435 mmiowb(); /* keep prod updates ordered */
1437 DP(NETIF_MSG_RX_STATUS,
1438 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1439 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1442 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1444 struct bnx2x *bp = fp->bp;
1445 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1446 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1449 #ifdef BNX2X_STOP_ON_ERROR
1450 if (unlikely(bp->panic))
1454 /* CQ "next element" is of the size of the regular element,
1455 that's why it's ok here */
1456 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1460 bd_cons = fp->rx_bd_cons;
1461 bd_prod = fp->rx_bd_prod;
1462 bd_prod_fw = bd_prod;
1463 sw_comp_cons = fp->rx_comp_cons;
1464 sw_comp_prod = fp->rx_comp_prod;
1466 /* Memory barrier necessary as speculative reads of the rx
1467 * buffer can be ahead of the index in the status block
1471 DP(NETIF_MSG_RX_STATUS,
1472 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1473 fp->index, hw_comp_cons, sw_comp_cons);
1475 while (sw_comp_cons != hw_comp_cons) {
1476 struct sw_rx_bd *rx_buf = NULL;
1477 struct sk_buff *skb;
1478 union eth_rx_cqe *cqe;
1482 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483 bd_prod = RX_BD(bd_prod);
1484 bd_cons = RX_BD(bd_cons);
1486 cqe = &fp->rx_comp_ring[comp_ring_cons];
1487 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1489 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1490 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1491 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1492 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1493 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1496 /* is this a slowpath msg? */
1497 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1498 bnx2x_sp_event(fp, cqe);
1501 /* this is an rx packet */
1503 rx_buf = &fp->rx_buf_ring[bd_cons];
1505 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506 pad = cqe->fast_path_cqe.placement_offset;
1508 /* If CQE is marked both TPA_START and TPA_END
1509 it is a non-TPA CQE */
1510 if ((!fp->disable_tpa) &&
1511 (TPA_TYPE(cqe_fp_flags) !=
1512 (TPA_TYPE_START | TPA_TYPE_END))) {
1513 u16 queue = cqe->fast_path_cqe.queue_index;
1515 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516 DP(NETIF_MSG_RX_STATUS,
1517 "calling tpa_start on queue %d\n",
1520 bnx2x_tpa_start(fp, queue, skb,
1525 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526 DP(NETIF_MSG_RX_STATUS,
1527 "calling tpa_stop on queue %d\n",
1530 if (!BNX2X_RX_SUM_FIX(cqe))
1531 BNX2X_ERR("STOP on none TCP "
1534 /* This is a size of the linear data
1536 len = le16_to_cpu(cqe->fast_path_cqe.
1538 bnx2x_tpa_stop(bp, fp, queue, pad,
1539 len, cqe, comp_ring_cons);
1540 #ifdef BNX2X_STOP_ON_ERROR
1545 bnx2x_update_sge_prod(fp,
1546 &cqe->fast_path_cqe);
1551 pci_dma_sync_single_for_device(bp->pdev,
1552 pci_unmap_addr(rx_buf, mapping),
1553 pad + RX_COPY_THRESH,
1554 PCI_DMA_FROMDEVICE);
1556 prefetch(((char *)(skb)) + 128);
1558 /* is this an error packet? */
1559 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1560 DP(NETIF_MSG_RX_ERR,
1561 "ERROR flags %x rx packet %u\n",
1562 cqe_fp_flags, sw_comp_cons);
1563 fp->eth_q_stats.rx_err_discard_pkt++;
1567 /* Since we don't have a jumbo ring
1568 * copy small packets if mtu > 1500
1570 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571 (len <= RX_COPY_THRESH)) {
1572 struct sk_buff *new_skb;
1574 new_skb = netdev_alloc_skb(bp->dev,
1576 if (new_skb == NULL) {
1577 DP(NETIF_MSG_RX_ERR,
1578 "ERROR packet dropped "
1579 "because of alloc failure\n");
1580 fp->eth_q_stats.rx_skb_alloc_failed++;
1585 skb_copy_from_linear_data_offset(skb, pad,
1586 new_skb->data + pad, len);
1587 skb_reserve(new_skb, pad);
1588 skb_put(new_skb, len);
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1594 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595 pci_unmap_single(bp->pdev,
1596 pci_unmap_addr(rx_buf, mapping),
1598 PCI_DMA_FROMDEVICE);
1599 skb_reserve(skb, pad);
1603 DP(NETIF_MSG_RX_ERR,
1604 "ERROR packet dropped because "
1605 "of alloc failure\n");
1606 fp->eth_q_stats.rx_skb_alloc_failed++;
1608 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612 skb->protocol = eth_type_trans(skb, bp->dev);
1614 skb->ip_summed = CHECKSUM_NONE;
1616 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617 skb->ip_summed = CHECKSUM_UNNECESSARY;
1619 fp->eth_q_stats.hw_csum_err++;
1623 skb_record_rx_queue(skb, fp->index);
1625 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1626 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627 PARSING_FLAGS_VLAN))
1628 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1632 netif_receive_skb(skb);
1638 bd_cons = NEXT_RX_IDX(bd_cons);
1639 bd_prod = NEXT_RX_IDX(bd_prod);
1640 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1643 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1646 if (rx_pkt == budget)
1650 fp->rx_bd_cons = bd_cons;
1651 fp->rx_bd_prod = bd_prod_fw;
1652 fp->rx_comp_cons = sw_comp_cons;
1653 fp->rx_comp_prod = sw_comp_prod;
1655 /* Update producers */
1656 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1659 fp->rx_pkt += rx_pkt;
1665 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1667 struct bnx2x_fastpath *fp = fp_cookie;
1668 struct bnx2x *bp = fp->bp;
1669 int index = fp->index;
1671 /* Return here if interrupt is disabled */
1672 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1677 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1679 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1681 #ifdef BNX2X_STOP_ON_ERROR
1682 if (unlikely(bp->panic))
1686 prefetch(fp->rx_cons_sb);
1687 prefetch(fp->tx_cons_sb);
1688 prefetch(&fp->status_blk->c_status_block.status_block_index);
1689 prefetch(&fp->status_blk->u_status_block.status_block_index);
1691 napi_schedule(&bnx2x_fp(bp, index, napi));
1696 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1698 struct bnx2x *bp = netdev_priv(dev_instance);
1699 u16 status = bnx2x_ack_int(bp);
1702 /* Return here if interrupt is shared and it's not for us */
1703 if (unlikely(status == 0)) {
1704 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1707 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1709 /* Return here if interrupt is disabled */
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1715 #ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp->panic))
1720 mask = 0x2 << bp->fp[0].sb_id;
1721 if (status & mask) {
1722 struct bnx2x_fastpath *fp = &bp->fp[0];
1724 prefetch(fp->rx_cons_sb);
1725 prefetch(fp->tx_cons_sb);
1726 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727 prefetch(&fp->status_blk->u_status_block.status_block_index);
1729 napi_schedule(&bnx2x_fp(bp, 0, napi));
1735 if (unlikely(status & 0x1)) {
1736 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1744 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1750 /* end of fast path */
1752 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1757 * General service functions
1760 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1763 u32 resource_bit = (1 << resource);
1764 int func = BP_FUNC(bp);
1765 u32 hw_lock_control_reg;
1768 /* Validating that the resource is within range */
1769 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1771 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1777 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1779 hw_lock_control_reg =
1780 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1783 /* Validating that the resource is not already taken */
1784 lock_status = REG_RD(bp, hw_lock_control_reg);
1785 if (lock_status & resource_bit) {
1786 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1787 lock_status, resource_bit);
1791 /* Try for 5 second every 5ms */
1792 for (cnt = 0; cnt < 1000; cnt++) {
1793 /* Try to acquire the lock */
1794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795 lock_status = REG_RD(bp, hw_lock_control_reg);
1796 if (lock_status & resource_bit)
1801 DP(NETIF_MSG_HW, "Timeout\n");
1805 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1808 u32 resource_bit = (1 << resource);
1809 int func = BP_FUNC(bp);
1810 u32 hw_lock_control_reg;
1812 /* Validating that the resource is within range */
1813 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1815 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1821 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1823 hw_lock_control_reg =
1824 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1827 /* Validating that the resource is currently taken */
1828 lock_status = REG_RD(bp, hw_lock_control_reg);
1829 if (!(lock_status & resource_bit)) {
1830 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1831 lock_status, resource_bit);
1835 REG_WR(bp, hw_lock_control_reg, resource_bit);
1839 /* HW Lock for shared dual port PHYs */
1840 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1842 mutex_lock(&bp->port.phy_mutex);
1844 if (bp->port.need_hw_lock)
1845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1848 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1850 if (bp->port.need_hw_lock)
1851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1853 mutex_unlock(&bp->port.phy_mutex);
1856 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1858 /* The GPIO should be swapped if swap register is set and active */
1859 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861 int gpio_shift = gpio_num +
1862 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863 u32 gpio_mask = (1 << gpio_shift);
1867 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1872 /* read GPIO value */
1873 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1875 /* get the requested pin value */
1876 if ((gpio_reg & gpio_mask) == gpio_mask)
1881 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1886 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1888 /* The GPIO should be swapped if swap register is set and active */
1889 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1890 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1891 int gpio_shift = gpio_num +
1892 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893 u32 gpio_mask = (1 << gpio_shift);
1896 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1901 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1902 /* read GPIO and mask except the float bits */
1903 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1906 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908 gpio_num, gpio_shift);
1909 /* clear FLOAT and set CLR */
1910 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1914 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916 gpio_num, gpio_shift);
1917 /* clear FLOAT and set SET */
1918 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1922 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1923 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924 gpio_num, gpio_shift);
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1933 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1939 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1941 /* The GPIO should be swapped if swap register is set and active */
1942 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944 int gpio_shift = gpio_num +
1945 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946 u32 gpio_mask = (1 << gpio_shift);
1949 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1956 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1959 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961 "output low\n", gpio_num, gpio_shift);
1962 /* clear SET and set CLR */
1963 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1967 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969 "output high\n", gpio_num, gpio_shift);
1970 /* clear CLR and set SET */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1979 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1985 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1987 u32 spio_mask = (1 << spio_num);
1990 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991 (spio_num > MISC_REGISTERS_SPIO_7)) {
1992 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1996 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1997 /* read SPIO and mask except the float bits */
1998 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2001 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2002 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003 /* clear FLOAT and set CLR */
2004 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2008 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010 /* clear FLOAT and set SET */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2015 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2018 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2025 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2026 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2031 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2033 switch (bp->link_vars.ieee_fc &
2034 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2035 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2036 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2039 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2040 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2043 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2044 bp->port.advertising |= ADVERTISED_Asym_Pause;
2047 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2053 static void bnx2x_link_report(struct bnx2x *bp)
2055 if (bp->link_vars.link_up) {
2056 if (bp->state == BNX2X_STATE_OPEN)
2057 netif_carrier_on(bp->dev);
2058 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2060 printk("%d Mbps ", bp->link_vars.line_speed);
2062 if (bp->link_vars.duplex == DUPLEX_FULL)
2063 printk("full duplex");
2065 printk("half duplex");
2067 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2068 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2069 printk(", receive ");
2070 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2071 printk("& transmit ");
2073 printk(", transmit ");
2075 printk("flow control ON");
2079 } else { /* link_down */
2080 netif_carrier_off(bp->dev);
2081 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2085 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2087 if (!BP_NOMCP(bp)) {
2090 /* Initialize link parameters structure variables */
2091 /* It is recommended to turn off RX FC for jumbo frames
2092 for better performance */
2094 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2095 else if (bp->dev->mtu > 5000)
2096 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2098 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2100 bnx2x_acquire_phy_lock(bp);
2102 if (load_mode == LOAD_DIAG)
2103 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2105 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2107 bnx2x_release_phy_lock(bp);
2109 bnx2x_calc_fc_adv(bp);
2111 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2112 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2113 bnx2x_link_report(bp);
2118 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2122 static void bnx2x_link_set(struct bnx2x *bp)
2124 if (!BP_NOMCP(bp)) {
2125 bnx2x_acquire_phy_lock(bp);
2126 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2127 bnx2x_release_phy_lock(bp);
2129 bnx2x_calc_fc_adv(bp);
2131 BNX2X_ERR("Bootcode is missing -not setting link\n");
2134 static void bnx2x__link_reset(struct bnx2x *bp)
2136 if (!BP_NOMCP(bp)) {
2137 bnx2x_acquire_phy_lock(bp);
2138 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2139 bnx2x_release_phy_lock(bp);
2141 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2144 static u8 bnx2x_link_test(struct bnx2x *bp)
2148 bnx2x_acquire_phy_lock(bp);
2149 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2150 bnx2x_release_phy_lock(bp);
2155 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2157 u32 r_param = bp->link_vars.line_speed / 8;
2158 u32 fair_periodic_timeout_usec;
2161 memset(&(bp->cmng.rs_vars), 0,
2162 sizeof(struct rate_shaping_vars_per_port));
2163 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2165 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2166 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2168 /* this is the threshold below which no timer arming will occur
2169 1.25 coefficient is for the threshold to be a little bigger
2170 than the real time, to compensate for timer in-accuracy */
2171 bp->cmng.rs_vars.rs_threshold =
2172 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2174 /* resolution of fairness timer */
2175 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2176 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2177 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2179 /* this is the threshold below which we won't arm the timer anymore */
2180 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2182 /* we multiply by 1e3/8 to get bytes/msec.
2183 We don't want the credits to pass a credit
2184 of the t_fair*FAIR_MEM (algorithm resolution) */
2185 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2186 /* since each tick is 4 usec */
2187 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2190 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2192 struct rate_shaping_vars_per_vn m_rs_vn;
2193 struct fairness_vars_per_vn m_fair_vn;
2194 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2195 u16 vn_min_rate, vn_max_rate;
2198 /* If function is hidden - set min and max to zeroes */
2199 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2204 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2205 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2206 /* If fairness is enabled (not all min rates are zeroes) and
2207 if current min rate is zero - set it to 1.
2208 This is a requirement of the algorithm. */
2209 if (bp->vn_weight_sum && (vn_min_rate == 0))
2210 vn_min_rate = DEF_MIN_RATE;
2211 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2212 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2216 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2217 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2219 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2220 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2222 /* global vn counter - maximal Mbps for this vn */
2223 m_rs_vn.vn_counter.rate = vn_max_rate;
2225 /* quota - number of bytes transmitted in this period */
2226 m_rs_vn.vn_counter.quota =
2227 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2229 if (bp->vn_weight_sum) {
2230 /* credit for each period of the fairness algorithm:
2231 number of bytes in T_FAIR (the vn share the port rate).
2232 vn_weight_sum should not be larger than 10000, thus
2233 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2235 m_fair_vn.vn_credit_delta =
2236 max((u32)(vn_min_rate * (T_FAIR_COEF /
2237 (8 * bp->vn_weight_sum))),
2238 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2239 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2240 m_fair_vn.vn_credit_delta);
2243 /* Store it to internal memory */
2244 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2245 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2247 ((u32 *)(&m_rs_vn))[i]);
2249 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2250 REG_WR(bp, BAR_XSTRORM_INTMEM +
2251 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2252 ((u32 *)(&m_fair_vn))[i]);
2256 /* This function is called upon link interrupt */
2257 static void bnx2x_link_attn(struct bnx2x *bp)
2259 /* Make sure that we are synced with the current statistics */
2260 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2262 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2264 if (bp->link_vars.link_up) {
2266 /* dropless flow control */
2267 if (CHIP_IS_E1H(bp)) {
2268 int port = BP_PORT(bp);
2269 u32 pause_enabled = 0;
2271 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2274 REG_WR(bp, BAR_USTRORM_INTMEM +
2275 USTORM_PAUSE_ENABLED_OFFSET(port),
2279 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2280 struct host_port_stats *pstats;
2282 pstats = bnx2x_sp(bp, port_stats);
2283 /* reset old bmac stats */
2284 memset(&(pstats->mac_stx[0]), 0,
2285 sizeof(struct mac_stx));
2287 if ((bp->state == BNX2X_STATE_OPEN) ||
2288 (bp->state == BNX2X_STATE_DISABLED))
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2292 /* indicate link status */
2293 bnx2x_link_report(bp);
2296 int port = BP_PORT(bp);
2300 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2301 if (vn == BP_E1HVN(bp))
2304 func = ((vn << 1) | port);
2306 /* Set the attention towards other drivers
2308 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2309 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2312 if (bp->link_vars.link_up) {
2315 /* Init rate shaping and fairness contexts */
2316 bnx2x_init_port_minmax(bp);
2318 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2319 bnx2x_init_vn_minmax(bp, 2*vn + port);
2321 /* Store it to internal memory */
2323 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2324 REG_WR(bp, BAR_XSTRORM_INTMEM +
2325 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2326 ((u32 *)(&bp->cmng))[i]);
2331 static void bnx2x__link_status_update(struct bnx2x *bp)
2333 if (bp->state != BNX2X_STATE_OPEN)
2336 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2338 if (bp->link_vars.link_up)
2339 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2341 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2343 /* indicate link status */
2344 bnx2x_link_report(bp);
2347 static void bnx2x_pmf_update(struct bnx2x *bp)
2349 int port = BP_PORT(bp);
2353 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2355 /* enable nig attention */
2356 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2357 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2358 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2360 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2368 * General service functions
2371 /* the slow path queue is odd since completions arrive on the fastpath ring */
2372 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2373 u32 data_hi, u32 data_lo, int common)
2375 int func = BP_FUNC(bp);
2377 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2378 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2379 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2380 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2381 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2383 #ifdef BNX2X_STOP_ON_ERROR
2384 if (unlikely(bp->panic))
2388 spin_lock_bh(&bp->spq_lock);
2390 if (!bp->spq_left) {
2391 BNX2X_ERR("BUG! SPQ ring full!\n");
2392 spin_unlock_bh(&bp->spq_lock);
2397 /* CID needs port number to be encoded int it */
2398 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2399 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2401 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2403 bp->spq_prod_bd->hdr.type |=
2404 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2406 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2407 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2411 if (bp->spq_prod_bd == bp->spq_last_bd) {
2412 bp->spq_prod_bd = bp->spq;
2413 bp->spq_prod_idx = 0;
2414 DP(NETIF_MSG_TIMER, "end of spq\n");
2421 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2424 spin_unlock_bh(&bp->spq_lock);
2428 /* acquire split MCP access lock register */
2429 static int bnx2x_acquire_alr(struct bnx2x *bp)
2436 for (j = 0; j < i*10; j++) {
2438 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2439 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2440 if (val & (1L << 31))
2445 if (!(val & (1L << 31))) {
2446 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2453 /* release split MCP access lock register */
2454 static void bnx2x_release_alr(struct bnx2x *bp)
2458 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2461 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2463 struct host_def_status_block *def_sb = bp->def_status_blk;
2466 barrier(); /* status block is written to by the chip */
2467 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2468 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2471 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2472 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2475 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2476 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2479 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2480 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2483 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2484 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2491 * slow path service functions
2494 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2496 int port = BP_PORT(bp);
2497 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2498 COMMAND_REG_ATTN_BITS_SET);
2499 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2500 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2501 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2502 NIG_REG_MASK_INTERRUPT_PORT0;
2506 if (bp->attn_state & asserted)
2507 BNX2X_ERR("IGU ERROR\n");
2509 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2510 aeu_mask = REG_RD(bp, aeu_addr);
2512 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2513 aeu_mask, asserted);
2514 aeu_mask &= ~(asserted & 0xff);
2515 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2517 REG_WR(bp, aeu_addr, aeu_mask);
2518 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2520 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2521 bp->attn_state |= asserted;
2522 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2524 if (asserted & ATTN_HARD_WIRED_MASK) {
2525 if (asserted & ATTN_NIG_FOR_FUNC) {
2527 bnx2x_acquire_phy_lock(bp);
2529 /* save nig interrupt mask */
2530 nig_mask = REG_RD(bp, nig_int_mask_addr);
2531 REG_WR(bp, nig_int_mask_addr, 0);
2533 bnx2x_link_attn(bp);
2535 /* handle unicore attn? */
2537 if (asserted & ATTN_SW_TIMER_4_FUNC)
2538 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2540 if (asserted & GPIO_2_FUNC)
2541 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2543 if (asserted & GPIO_3_FUNC)
2544 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2546 if (asserted & GPIO_4_FUNC)
2547 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2550 if (asserted & ATTN_GENERAL_ATTN_1) {
2551 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2552 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2554 if (asserted & ATTN_GENERAL_ATTN_2) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2558 if (asserted & ATTN_GENERAL_ATTN_3) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2563 if (asserted & ATTN_GENERAL_ATTN_4) {
2564 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2565 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2567 if (asserted & ATTN_GENERAL_ATTN_5) {
2568 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2569 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2571 if (asserted & ATTN_GENERAL_ATTN_6) {
2572 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2577 } /* if hardwired */
2579 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2581 REG_WR(bp, hc_addr, asserted);
2583 /* now set back the mask */
2584 if (asserted & ATTN_NIG_FOR_FUNC) {
2585 REG_WR(bp, nig_int_mask_addr, nig_mask);
2586 bnx2x_release_phy_lock(bp);
2590 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2592 int port = BP_PORT(bp);
2596 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2597 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2599 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2601 val = REG_RD(bp, reg_offset);
2602 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2603 REG_WR(bp, reg_offset, val);
2605 BNX2X_ERR("SPIO5 hw attention\n");
2607 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2608 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2609 /* Fan failure attention */
2611 /* The PHY reset is controlled by GPIO 1 */
2612 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2613 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2614 /* Low power mode is controlled by GPIO 2 */
2615 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2616 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2617 /* mark the failure */
2618 bp->link_params.ext_phy_config &=
2619 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2620 bp->link_params.ext_phy_config |=
2621 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2623 dev_info.port_hw_config[port].
2624 external_phy_config,
2625 bp->link_params.ext_phy_config);
2626 /* log the failure */
2627 printk(KERN_ERR PFX "Fan Failure on Network"
2628 " Controller %s has caused the driver to"
2629 " shutdown the card to prevent permanent"
2630 " damage. Please contact Dell Support for"
2631 " assistance\n", bp->dev->name);
2639 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2640 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2641 bnx2x_acquire_phy_lock(bp);
2642 bnx2x_handle_module_detect_int(&bp->link_params);
2643 bnx2x_release_phy_lock(bp);
2646 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2648 val = REG_RD(bp, reg_offset);
2649 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2650 REG_WR(bp, reg_offset, val);
2652 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2653 (attn & HW_INTERRUT_ASSERT_SET_0));
2658 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2662 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2664 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2665 BNX2X_ERR("DB hw attention 0x%x\n", val);
2666 /* DORQ discard attention */
2668 BNX2X_ERR("FATAL error from DORQ\n");
2671 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2673 int port = BP_PORT(bp);
2676 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2677 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2679 val = REG_RD(bp, reg_offset);
2680 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2681 REG_WR(bp, reg_offset, val);
2683 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2684 (attn & HW_INTERRUT_ASSERT_SET_1));
2689 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2693 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2695 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2696 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2697 /* CFC error attention */
2699 BNX2X_ERR("FATAL error from CFC\n");
2702 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2704 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2705 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2706 /* RQ_USDMDP_FIFO_OVERFLOW */
2708 BNX2X_ERR("FATAL error from PXP\n");
2711 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2713 int port = BP_PORT(bp);
2716 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2717 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2719 val = REG_RD(bp, reg_offset);
2720 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2721 REG_WR(bp, reg_offset, val);
2723 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2724 (attn & HW_INTERRUT_ASSERT_SET_2));
2729 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2733 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2735 if (attn & BNX2X_PMF_LINK_ASSERT) {
2736 int func = BP_FUNC(bp);
2738 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2739 bnx2x__link_status_update(bp);
2740 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2742 bnx2x_pmf_update(bp);
2744 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2746 BNX2X_ERR("MC assert!\n");
2747 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2748 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2749 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2753 } else if (attn & BNX2X_MCP_ASSERT) {
2755 BNX2X_ERR("MCP assert!\n");
2756 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2760 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2763 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2764 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2765 if (attn & BNX2X_GRC_TIMEOUT) {
2766 val = CHIP_IS_E1H(bp) ?
2767 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2768 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2770 if (attn & BNX2X_GRC_RSV) {
2771 val = CHIP_IS_E1H(bp) ?
2772 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2773 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2775 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2779 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2781 struct attn_route attn;
2782 struct attn_route group_mask;
2783 int port = BP_PORT(bp);
2789 /* need to take HW lock because MCP or other port might also
2790 try to handle this event */
2791 bnx2x_acquire_alr(bp);
2793 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2794 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2795 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2796 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2797 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2798 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2800 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2801 if (deasserted & (1 << index)) {
2802 group_mask = bp->attn_group[index];
2804 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2805 index, group_mask.sig[0], group_mask.sig[1],
2806 group_mask.sig[2], group_mask.sig[3]);
2808 bnx2x_attn_int_deasserted3(bp,
2809 attn.sig[3] & group_mask.sig[3]);
2810 bnx2x_attn_int_deasserted1(bp,
2811 attn.sig[1] & group_mask.sig[1]);
2812 bnx2x_attn_int_deasserted2(bp,
2813 attn.sig[2] & group_mask.sig[2]);
2814 bnx2x_attn_int_deasserted0(bp,
2815 attn.sig[0] & group_mask.sig[0]);
2817 if ((attn.sig[0] & group_mask.sig[0] &
2818 HW_PRTY_ASSERT_SET_0) ||
2819 (attn.sig[1] & group_mask.sig[1] &
2820 HW_PRTY_ASSERT_SET_1) ||
2821 (attn.sig[2] & group_mask.sig[2] &
2822 HW_PRTY_ASSERT_SET_2))
2823 BNX2X_ERR("FATAL HW block parity attention\n");
2827 bnx2x_release_alr(bp);
2829 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2832 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2834 REG_WR(bp, reg_addr, val);
2836 if (~bp->attn_state & deasserted)
2837 BNX2X_ERR("IGU ERROR\n");
2839 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2840 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2842 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2843 aeu_mask = REG_RD(bp, reg_addr);
2845 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2846 aeu_mask, deasserted);
2847 aeu_mask |= (deasserted & 0xff);
2848 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2850 REG_WR(bp, reg_addr, aeu_mask);
2851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2853 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2854 bp->attn_state &= ~deasserted;
2855 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2858 static void bnx2x_attn_int(struct bnx2x *bp)
2860 /* read local copy of bits */
2861 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2863 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2865 u32 attn_state = bp->attn_state;
2867 /* look for changed bits */
2868 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2869 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2872 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2873 attn_bits, attn_ack, asserted, deasserted);
2875 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2876 BNX2X_ERR("BAD attention state\n");
2878 /* handle bits that were raised */
2880 bnx2x_attn_int_asserted(bp, asserted);
2883 bnx2x_attn_int_deasserted(bp, deasserted);
2886 static void bnx2x_sp_task(struct work_struct *work)
2888 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2892 /* Return here if interrupt is disabled */
2893 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2894 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2898 status = bnx2x_update_dsb_idx(bp);
2899 /* if (status == 0) */
2900 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2902 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2908 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2910 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2912 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2914 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2916 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2921 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2923 struct net_device *dev = dev_instance;
2924 struct bnx2x *bp = netdev_priv(dev);
2926 /* Return here if interrupt is disabled */
2927 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2928 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2932 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2934 #ifdef BNX2X_STOP_ON_ERROR
2935 if (unlikely(bp->panic))
2939 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2944 /* end of slow path */
2948 /****************************************************************************
2950 ****************************************************************************/
2952 /* sum[hi:lo] += add[hi:lo] */
2953 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2956 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2959 /* difference = minuend - subtrahend */
2960 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2962 if (m_lo < s_lo) { \
2964 d_hi = m_hi - s_hi; \
2966 /* we can 'loan' 1 */ \
2968 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2970 /* m_hi <= s_hi */ \
2975 /* m_lo >= s_lo */ \
2976 if (m_hi < s_hi) { \
2980 /* m_hi >= s_hi */ \
2981 d_hi = m_hi - s_hi; \
2982 d_lo = m_lo - s_lo; \
2987 #define UPDATE_STAT64(s, t) \
2989 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2990 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2991 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2992 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2993 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2994 pstats->mac_stx[1].t##_lo, diff.lo); \
2997 #define UPDATE_STAT64_NIG(s, t) \
2999 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3000 diff.lo, new->s##_lo, old->s##_lo); \
3001 ADD_64(estats->t##_hi, diff.hi, \
3002 estats->t##_lo, diff.lo); \
3005 /* sum[hi:lo] += add */
3006 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3009 s_hi += (s_lo < a) ? 1 : 0; \
3012 #define UPDATE_EXTEND_STAT(s) \
3014 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3015 pstats->mac_stx[1].s##_lo, \
3019 #define UPDATE_EXTEND_TSTAT(s, t) \
3021 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3022 old_tclient->s = tclient->s; \
3023 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3026 #define UPDATE_EXTEND_USTAT(s, t) \
3028 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3029 old_uclient->s = uclient->s; \
3030 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3033 #define UPDATE_EXTEND_XSTAT(s, t) \
3035 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3036 old_xclient->s = xclient->s; \
3037 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3040 /* minuend -= subtrahend */
3041 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3043 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3046 /* minuend[hi:lo] -= subtrahend */
3047 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3049 SUB_64(m_hi, 0, m_lo, s); \
3052 #define SUB_EXTEND_USTAT(s, t) \
3054 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3055 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3059 * General service functions
3062 static inline long bnx2x_hilo(u32 *hiref)
3064 u32 lo = *(hiref + 1);
3065 #if (BITS_PER_LONG == 64)
3068 return HILO_U64(hi, lo);
3075 * Init service functions
3078 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3080 if (!bp->stats_pending) {
3081 struct eth_query_ramrod_data ramrod_data = {0};
3084 ramrod_data.drv_counter = bp->stats_counter++;
3085 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3086 for_each_queue(bp, i)
3087 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3089 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3090 ((u32 *)&ramrod_data)[1],
3091 ((u32 *)&ramrod_data)[0], 0);
3093 /* stats ramrod has it's own slot on the spq */
3095 bp->stats_pending = 1;
3100 static void bnx2x_stats_init(struct bnx2x *bp)
3102 int port = BP_PORT(bp);
3105 bp->stats_pending = 0;
3106 bp->executer_idx = 0;
3107 bp->stats_counter = 0;
3111 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3113 bp->port.port_stx = 0;
3114 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3116 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3117 bp->port.old_nig_stats.brb_discard =
3118 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3119 bp->port.old_nig_stats.brb_truncate =
3120 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3121 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3122 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3123 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3124 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3126 /* function stats */
3127 for_each_queue(bp, i) {
3128 struct bnx2x_fastpath *fp = &bp->fp[i];
3130 memset(&fp->old_tclient, 0,
3131 sizeof(struct tstorm_per_client_stats));
3132 memset(&fp->old_uclient, 0,
3133 sizeof(struct ustorm_per_client_stats));
3134 memset(&fp->old_xclient, 0,
3135 sizeof(struct xstorm_per_client_stats));
3136 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3139 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3140 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3142 bp->stats_state = STATS_STATE_DISABLED;
3143 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3144 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3147 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3149 struct dmae_command *dmae = &bp->stats_dmae;
3150 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3152 *stats_comp = DMAE_COMP_VAL;
3153 if (CHIP_REV_IS_SLOW(bp))
3157 if (bp->executer_idx) {
3158 int loader_idx = PMF_DMAE_C(bp);
3160 memset(dmae, 0, sizeof(struct dmae_command));
3162 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3163 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3164 DMAE_CMD_DST_RESET |
3166 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3168 DMAE_CMD_ENDIANITY_DW_SWAP |
3170 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3172 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3173 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3174 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3175 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3176 sizeof(struct dmae_command) *
3177 (loader_idx + 1)) >> 2;
3178 dmae->dst_addr_hi = 0;
3179 dmae->len = sizeof(struct dmae_command) >> 2;
3182 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3183 dmae->comp_addr_hi = 0;
3187 bnx2x_post_dmae(bp, dmae, loader_idx);
3189 } else if (bp->func_stx) {
3191 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3195 static int bnx2x_stats_comp(struct bnx2x *bp)
3197 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3201 while (*stats_comp != DMAE_COMP_VAL) {
3203 BNX2X_ERR("timeout waiting for stats finished\n");
3213 * Statistics service functions
3216 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3218 struct dmae_command *dmae;
3220 int loader_idx = PMF_DMAE_C(bp);
3221 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3224 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3225 BNX2X_ERR("BUG!\n");
3229 bp->executer_idx = 0;
3231 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3233 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3235 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3237 DMAE_CMD_ENDIANITY_DW_SWAP |
3239 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3242 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3243 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3244 dmae->src_addr_lo = bp->port.port_stx >> 2;
3245 dmae->src_addr_hi = 0;
3246 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3247 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3248 dmae->len = DMAE_LEN32_RD_MAX;
3249 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3250 dmae->comp_addr_hi = 0;
3253 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3254 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3255 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3256 dmae->src_addr_hi = 0;
3257 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3258 DMAE_LEN32_RD_MAX * 4);
3259 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3260 DMAE_LEN32_RD_MAX * 4);
3261 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3262 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3263 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3264 dmae->comp_val = DMAE_COMP_VAL;
3267 bnx2x_hw_stats_post(bp);
3268 bnx2x_stats_comp(bp);
3271 static void bnx2x_port_stats_init(struct bnx2x *bp)
3273 struct dmae_command *dmae;
3274 int port = BP_PORT(bp);
3275 int vn = BP_E1HVN(bp);
3277 int loader_idx = PMF_DMAE_C(bp);
3279 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3282 if (!bp->link_vars.link_up || !bp->port.pmf) {
3283 BNX2X_ERR("BUG!\n");
3287 bp->executer_idx = 0;
3290 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3291 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3292 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3294 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3296 DMAE_CMD_ENDIANITY_DW_SWAP |
3298 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3299 (vn << DMAE_CMD_E1HVN_SHIFT));
3301 if (bp->port.port_stx) {
3303 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3304 dmae->opcode = opcode;
3305 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3306 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3307 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3308 dmae->dst_addr_hi = 0;
3309 dmae->len = sizeof(struct host_port_stats) >> 2;
3310 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3311 dmae->comp_addr_hi = 0;
3317 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3318 dmae->opcode = opcode;
3319 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3320 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3321 dmae->dst_addr_lo = bp->func_stx >> 2;
3322 dmae->dst_addr_hi = 0;
3323 dmae->len = sizeof(struct host_func_stats) >> 2;
3324 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3325 dmae->comp_addr_hi = 0;
3330 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3331 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3332 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3334 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3336 DMAE_CMD_ENDIANITY_DW_SWAP |
3338 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3339 (vn << DMAE_CMD_E1HVN_SHIFT));
3341 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3343 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3344 NIG_REG_INGRESS_BMAC0_MEM);
3346 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3347 BIGMAC_REGISTER_TX_STAT_GTBYT */
3348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349 dmae->opcode = opcode;
3350 dmae->src_addr_lo = (mac_addr +
3351 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3352 dmae->src_addr_hi = 0;
3353 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3354 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3355 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3356 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3357 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3358 dmae->comp_addr_hi = 0;
3361 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3362 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3363 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3364 dmae->opcode = opcode;
3365 dmae->src_addr_lo = (mac_addr +
3366 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3367 dmae->src_addr_hi = 0;
3368 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3369 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3370 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3371 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3372 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3373 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3374 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3375 dmae->comp_addr_hi = 0;
3378 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3380 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3382 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3383 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3384 dmae->opcode = opcode;
3385 dmae->src_addr_lo = (mac_addr +
3386 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3387 dmae->src_addr_hi = 0;
3388 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3390 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392 dmae->comp_addr_hi = 0;
3395 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3396 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3397 dmae->opcode = opcode;
3398 dmae->src_addr_lo = (mac_addr +
3399 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3400 dmae->src_addr_hi = 0;
3401 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3402 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3403 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3404 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3407 dmae->comp_addr_hi = 0;
3410 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3411 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3412 dmae->opcode = opcode;
3413 dmae->src_addr_lo = (mac_addr +
3414 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3415 dmae->src_addr_hi = 0;
3416 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3417 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3418 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3419 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3420 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3421 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3422 dmae->comp_addr_hi = 0;
3427 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3428 dmae->opcode = opcode;
3429 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3430 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3431 dmae->src_addr_hi = 0;
3432 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3433 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3434 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3435 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3436 dmae->comp_addr_hi = 0;
3439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440 dmae->opcode = opcode;
3441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3442 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3443 dmae->src_addr_hi = 0;
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3445 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3446 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3447 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3448 dmae->len = (2*sizeof(u32)) >> 2;
3449 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3450 dmae->comp_addr_hi = 0;
3453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3455 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3456 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3458 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3460 DMAE_CMD_ENDIANITY_DW_SWAP |
3462 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3463 (vn << DMAE_CMD_E1HVN_SHIFT));
3464 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3465 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3466 dmae->src_addr_hi = 0;
3467 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3468 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3469 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3470 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3471 dmae->len = (2*sizeof(u32)) >> 2;
3472 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3473 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3474 dmae->comp_val = DMAE_COMP_VAL;
3479 static void bnx2x_func_stats_init(struct bnx2x *bp)
3481 struct dmae_command *dmae = &bp->stats_dmae;
3482 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3485 if (!bp->func_stx) {
3486 BNX2X_ERR("BUG!\n");
3490 bp->executer_idx = 0;
3491 memset(dmae, 0, sizeof(struct dmae_command));
3493 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3494 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3495 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3497 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3499 DMAE_CMD_ENDIANITY_DW_SWAP |
3501 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3502 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3503 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3504 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3505 dmae->dst_addr_lo = bp->func_stx >> 2;
3506 dmae->dst_addr_hi = 0;
3507 dmae->len = sizeof(struct host_func_stats) >> 2;
3508 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3509 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3510 dmae->comp_val = DMAE_COMP_VAL;
3515 static void bnx2x_stats_start(struct bnx2x *bp)
3518 bnx2x_port_stats_init(bp);
3520 else if (bp->func_stx)
3521 bnx2x_func_stats_init(bp);
3523 bnx2x_hw_stats_post(bp);
3524 bnx2x_storm_stats_post(bp);
3527 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3529 bnx2x_stats_comp(bp);
3530 bnx2x_stats_pmf_update(bp);
3531 bnx2x_stats_start(bp);
3534 static void bnx2x_stats_restart(struct bnx2x *bp)
3536 bnx2x_stats_comp(bp);
3537 bnx2x_stats_start(bp);
3540 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3542 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3543 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3544 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3550 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3551 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3552 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3553 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3554 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3555 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3556 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3557 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3558 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3559 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3560 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3561 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3562 UPDATE_STAT64(tx_stat_gt127,
3563 tx_stat_etherstatspkts65octetsto127octets);
3564 UPDATE_STAT64(tx_stat_gt255,
3565 tx_stat_etherstatspkts128octetsto255octets);
3566 UPDATE_STAT64(tx_stat_gt511,
3567 tx_stat_etherstatspkts256octetsto511octets);
3568 UPDATE_STAT64(tx_stat_gt1023,
3569 tx_stat_etherstatspkts512octetsto1023octets);
3570 UPDATE_STAT64(tx_stat_gt1518,
3571 tx_stat_etherstatspkts1024octetsto1522octets);
3572 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3573 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3574 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3575 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3576 UPDATE_STAT64(tx_stat_gterr,
3577 tx_stat_dot3statsinternalmactransmiterrors);
3578 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3580 estats->pause_frames_received_hi =
3581 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3582 estats->pause_frames_received_lo =
3583 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3585 estats->pause_frames_sent_hi =
3586 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3587 estats->pause_frames_sent_lo =
3588 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3591 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3593 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3594 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3595 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3597 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3598 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3599 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3600 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3601 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3602 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3603 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3605 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3606 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3607 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3608 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3609 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3610 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3611 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3612 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3613 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3614 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3615 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3616 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3617 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3618 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3620 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3621 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3622 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3623 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3629 estats->pause_frames_received_hi =
3630 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3631 estats->pause_frames_received_lo =
3632 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3633 ADD_64(estats->pause_frames_received_hi,
3634 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3635 estats->pause_frames_received_lo,
3636 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3638 estats->pause_frames_sent_hi =
3639 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3640 estats->pause_frames_sent_lo =
3641 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3642 ADD_64(estats->pause_frames_sent_hi,
3643 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3644 estats->pause_frames_sent_lo,
3645 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3648 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3650 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3651 struct nig_stats *old = &(bp->port.old_nig_stats);
3652 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3653 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3660 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3661 bnx2x_bmac_stats_update(bp);
3663 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3664 bnx2x_emac_stats_update(bp);
3666 else { /* unreached */
3667 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3671 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3672 new->brb_discard - old->brb_discard);
3673 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3674 new->brb_truncate - old->brb_truncate);
3676 UPDATE_STAT64_NIG(egress_mac_pkt0,
3677 etherstatspkts1024octetsto1522octets);
3678 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3680 memcpy(old, new, sizeof(struct nig_stats));
3682 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3683 sizeof(struct mac_stx));
3684 estats->brb_drop_hi = pstats->brb_drop_hi;
3685 estats->brb_drop_lo = pstats->brb_drop_lo;
3687 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3689 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3690 if (nig_timer_max != estats->nig_timer_max) {
3691 estats->nig_timer_max = nig_timer_max;
3692 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3698 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3700 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3701 struct tstorm_per_port_stats *tport =
3702 &stats->tstorm_common.port_statistics;
3703 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3704 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3707 memset(&(fstats->total_bytes_received_hi), 0,
3708 sizeof(struct host_func_stats) - 2*sizeof(u32));
3709 estats->error_bytes_received_hi = 0;
3710 estats->error_bytes_received_lo = 0;
3711 estats->etherstatsoverrsizepkts_hi = 0;
3712 estats->etherstatsoverrsizepkts_lo = 0;
3713 estats->no_buff_discard_hi = 0;
3714 estats->no_buff_discard_lo = 0;
3716 for_each_queue(bp, i) {
3717 struct bnx2x_fastpath *fp = &bp->fp[i];
3718 int cl_id = fp->cl_id;
3719 struct tstorm_per_client_stats *tclient =
3720 &stats->tstorm_common.client_statistics[cl_id];
3721 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3722 struct ustorm_per_client_stats *uclient =
3723 &stats->ustorm_common.client_statistics[cl_id];
3724 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3725 struct xstorm_per_client_stats *xclient =
3726 &stats->xstorm_common.client_statistics[cl_id];
3727 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3728 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3731 /* are storm stats valid? */
3732 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3733 bp->stats_counter) {
3734 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3735 " xstorm counter (%d) != stats_counter (%d)\n",
3736 i, xclient->stats_counter, bp->stats_counter);
3739 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3740 bp->stats_counter) {
3741 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3742 " tstorm counter (%d) != stats_counter (%d)\n",
3743 i, tclient->stats_counter, bp->stats_counter);
3746 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3747 bp->stats_counter) {
3748 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3749 " ustorm counter (%d) != stats_counter (%d)\n",
3750 i, uclient->stats_counter, bp->stats_counter);
3754 qstats->total_bytes_received_hi =
3755 qstats->valid_bytes_received_hi =
3756 le32_to_cpu(tclient->total_rcv_bytes.hi);
3757 qstats->total_bytes_received_lo =
3758 qstats->valid_bytes_received_lo =
3759 le32_to_cpu(tclient->total_rcv_bytes.lo);
3761 qstats->error_bytes_received_hi =
3762 le32_to_cpu(tclient->rcv_error_bytes.hi);
3763 qstats->error_bytes_received_lo =
3764 le32_to_cpu(tclient->rcv_error_bytes.lo);
3766 ADD_64(qstats->total_bytes_received_hi,
3767 qstats->error_bytes_received_hi,
3768 qstats->total_bytes_received_lo,
3769 qstats->error_bytes_received_lo);
3771 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3772 total_unicast_packets_received);
3773 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3774 total_multicast_packets_received);
3775 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3776 total_broadcast_packets_received);
3777 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3778 etherstatsoverrsizepkts);
3779 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3781 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3782 total_unicast_packets_received);
3783 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3784 total_multicast_packets_received);
3785 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3786 total_broadcast_packets_received);
3787 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3788 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3789 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3791 qstats->total_bytes_transmitted_hi =
3792 le32_to_cpu(xclient->total_sent_bytes.hi);
3793 qstats->total_bytes_transmitted_lo =
3794 le32_to_cpu(xclient->total_sent_bytes.lo);
3796 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3797 total_unicast_packets_transmitted);
3798 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3799 total_multicast_packets_transmitted);
3800 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3801 total_broadcast_packets_transmitted);
3803 old_tclient->checksum_discard = tclient->checksum_discard;
3804 old_tclient->ttl0_discard = tclient->ttl0_discard;
3806 ADD_64(fstats->total_bytes_received_hi,
3807 qstats->total_bytes_received_hi,
3808 fstats->total_bytes_received_lo,
3809 qstats->total_bytes_received_lo);
3810 ADD_64(fstats->total_bytes_transmitted_hi,
3811 qstats->total_bytes_transmitted_hi,
3812 fstats->total_bytes_transmitted_lo,
3813 qstats->total_bytes_transmitted_lo);
3814 ADD_64(fstats->total_unicast_packets_received_hi,
3815 qstats->total_unicast_packets_received_hi,
3816 fstats->total_unicast_packets_received_lo,
3817 qstats->total_unicast_packets_received_lo);
3818 ADD_64(fstats->total_multicast_packets_received_hi,
3819 qstats->total_multicast_packets_received_hi,
3820 fstats->total_multicast_packets_received_lo,
3821 qstats->total_multicast_packets_received_lo);
3822 ADD_64(fstats->total_broadcast_packets_received_hi,
3823 qstats->total_broadcast_packets_received_hi,
3824 fstats->total_broadcast_packets_received_lo,
3825 qstats->total_broadcast_packets_received_lo);
3826 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3827 qstats->total_unicast_packets_transmitted_hi,
3828 fstats->total_unicast_packets_transmitted_lo,
3829 qstats->total_unicast_packets_transmitted_lo);
3830 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3831 qstats->total_multicast_packets_transmitted_hi,
3832 fstats->total_multicast_packets_transmitted_lo,
3833 qstats->total_multicast_packets_transmitted_lo);
3834 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3835 qstats->total_broadcast_packets_transmitted_hi,
3836 fstats->total_broadcast_packets_transmitted_lo,
3837 qstats->total_broadcast_packets_transmitted_lo);
3838 ADD_64(fstats->valid_bytes_received_hi,
3839 qstats->valid_bytes_received_hi,
3840 fstats->valid_bytes_received_lo,
3841 qstats->valid_bytes_received_lo);
3843 ADD_64(estats->error_bytes_received_hi,
3844 qstats->error_bytes_received_hi,
3845 estats->error_bytes_received_lo,
3846 qstats->error_bytes_received_lo);
3847 ADD_64(estats->etherstatsoverrsizepkts_hi,
3848 qstats->etherstatsoverrsizepkts_hi,
3849 estats->etherstatsoverrsizepkts_lo,
3850 qstats->etherstatsoverrsizepkts_lo);
3851 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3852 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3855 ADD_64(fstats->total_bytes_received_hi,
3856 estats->rx_stat_ifhcinbadoctets_hi,
3857 fstats->total_bytes_received_lo,
3858 estats->rx_stat_ifhcinbadoctets_lo);
3860 memcpy(estats, &(fstats->total_bytes_received_hi),
3861 sizeof(struct host_func_stats) - 2*sizeof(u32));
3863 ADD_64(estats->etherstatsoverrsizepkts_hi,
3864 estats->rx_stat_dot3statsframestoolong_hi,
3865 estats->etherstatsoverrsizepkts_lo,
3866 estats->rx_stat_dot3statsframestoolong_lo);
3867 ADD_64(estats->error_bytes_received_hi,
3868 estats->rx_stat_ifhcinbadoctets_hi,
3869 estats->error_bytes_received_lo,
3870 estats->rx_stat_ifhcinbadoctets_lo);
3873 estats->mac_filter_discard =
3874 le32_to_cpu(tport->mac_filter_discard);
3875 estats->xxoverflow_discard =
3876 le32_to_cpu(tport->xxoverflow_discard);
3877 estats->brb_truncate_discard =
3878 le32_to_cpu(tport->brb_truncate_discard);
3879 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3882 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3884 bp->stats_pending = 0;
3889 static void bnx2x_net_stats_update(struct bnx2x *bp)
3891 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3892 struct net_device_stats *nstats = &bp->dev->stats;
3895 nstats->rx_packets =
3896 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3897 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3898 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3900 nstats->tx_packets =
3901 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3902 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3903 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3905 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3907 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3909 nstats->rx_dropped = estats->mac_discard;
3910 for_each_queue(bp, i)
3911 nstats->rx_dropped +=
3912 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3914 nstats->tx_dropped = 0;
3917 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3919 nstats->collisions =
3920 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3922 nstats->rx_length_errors =
3923 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3924 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3925 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3926 bnx2x_hilo(&estats->brb_truncate_hi);
3927 nstats->rx_crc_errors =
3928 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3929 nstats->rx_frame_errors =
3930 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3931 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3932 nstats->rx_missed_errors = estats->xxoverflow_discard;
3934 nstats->rx_errors = nstats->rx_length_errors +
3935 nstats->rx_over_errors +
3936 nstats->rx_crc_errors +
3937 nstats->rx_frame_errors +
3938 nstats->rx_fifo_errors +
3939 nstats->rx_missed_errors;
3941 nstats->tx_aborted_errors =
3942 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3943 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3944 nstats->tx_carrier_errors =
3945 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3946 nstats->tx_fifo_errors = 0;
3947 nstats->tx_heartbeat_errors = 0;
3948 nstats->tx_window_errors = 0;
3950 nstats->tx_errors = nstats->tx_aborted_errors +
3951 nstats->tx_carrier_errors +
3952 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3955 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3957 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3960 estats->driver_xoff = 0;
3961 estats->rx_err_discard_pkt = 0;
3962 estats->rx_skb_alloc_failed = 0;
3963 estats->hw_csum_err = 0;
3964 for_each_queue(bp, i) {
3965 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3967 estats->driver_xoff += qstats->driver_xoff;
3968 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3969 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3970 estats->hw_csum_err += qstats->hw_csum_err;
3974 static void bnx2x_stats_update(struct bnx2x *bp)
3976 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3978 if (*stats_comp != DMAE_COMP_VAL)
3982 bnx2x_hw_stats_update(bp);
3984 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3985 BNX2X_ERR("storm stats were not updated for 3 times\n");
3990 bnx2x_net_stats_update(bp);
3991 bnx2x_drv_stats_update(bp);
3993 if (bp->msglevel & NETIF_MSG_TIMER) {
3994 struct tstorm_per_client_stats *old_tclient =
3995 &bp->fp->old_tclient;
3996 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3997 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3998 struct net_device_stats *nstats = &bp->dev->stats;
4001 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4002 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4004 bnx2x_tx_avail(bp->fp),
4005 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4006 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4008 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4009 bp->fp->rx_comp_cons),
4010 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4011 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4012 "brb truncate %u\n",
4013 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4014 qstats->driver_xoff,
4015 estats->brb_drop_lo, estats->brb_truncate_lo);
4016 printk(KERN_DEBUG "tstats: checksum_discard %u "
4017 "packets_too_big_discard %lu no_buff_discard %lu "
4018 "mac_discard %u mac_filter_discard %u "
4019 "xxovrflow_discard %u brb_truncate_discard %u "
4020 "ttl0_discard %u\n",
4021 le32_to_cpu(old_tclient->checksum_discard),
4022 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4023 bnx2x_hilo(&qstats->no_buff_discard_hi),
4024 estats->mac_discard, estats->mac_filter_discard,
4025 estats->xxoverflow_discard, estats->brb_truncate_discard,
4026 le32_to_cpu(old_tclient->ttl0_discard));
4028 for_each_queue(bp, i) {
4029 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4030 bnx2x_fp(bp, i, tx_pkt),
4031 bnx2x_fp(bp, i, rx_pkt),
4032 bnx2x_fp(bp, i, rx_calls));
4036 bnx2x_hw_stats_post(bp);
4037 bnx2x_storm_stats_post(bp);
4040 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4042 struct dmae_command *dmae;
4044 int loader_idx = PMF_DMAE_C(bp);
4045 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4047 bp->executer_idx = 0;
4049 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4051 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4053 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4055 DMAE_CMD_ENDIANITY_DW_SWAP |
4057 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4058 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4060 if (bp->port.port_stx) {
4062 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4064 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4066 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4067 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4068 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4069 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4070 dmae->dst_addr_hi = 0;
4071 dmae->len = sizeof(struct host_port_stats) >> 2;
4073 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4074 dmae->comp_addr_hi = 0;
4077 dmae->comp_addr_lo =
4078 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4079 dmae->comp_addr_hi =
4080 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4081 dmae->comp_val = DMAE_COMP_VAL;
4089 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4090 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4091 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4092 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4093 dmae->dst_addr_lo = bp->func_stx >> 2;
4094 dmae->dst_addr_hi = 0;
4095 dmae->len = sizeof(struct host_func_stats) >> 2;
4096 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4097 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4098 dmae->comp_val = DMAE_COMP_VAL;
4104 static void bnx2x_stats_stop(struct bnx2x *bp)
4108 bnx2x_stats_comp(bp);
4111 update = (bnx2x_hw_stats_update(bp) == 0);
4113 update |= (bnx2x_storm_stats_update(bp) == 0);
4116 bnx2x_net_stats_update(bp);
4119 bnx2x_port_stats_stop(bp);
4121 bnx2x_hw_stats_post(bp);
4122 bnx2x_stats_comp(bp);
4126 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4130 static const struct {
4131 void (*action)(struct bnx2x *bp);
4132 enum bnx2x_stats_state next_state;
4133 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4136 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4137 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4138 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4139 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4142 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4143 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4144 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4145 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4149 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4151 enum bnx2x_stats_state state = bp->stats_state;
4153 bnx2x_stats_stm[state][event].action(bp);
4154 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4156 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4157 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4158 state, event, bp->stats_state);
4161 static void bnx2x_timer(unsigned long data)
4163 struct bnx2x *bp = (struct bnx2x *) data;
4165 if (!netif_running(bp->dev))
4168 if (atomic_read(&bp->intr_sem) != 0)
4172 struct bnx2x_fastpath *fp = &bp->fp[0];
4175 bnx2x_tx_int(fp, 1000);
4176 rc = bnx2x_rx_int(fp, 1000);
4179 if (!BP_NOMCP(bp)) {
4180 int func = BP_FUNC(bp);
4184 ++bp->fw_drv_pulse_wr_seq;
4185 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4186 /* TBD - add SYSTEM_TIME */
4187 drv_pulse = bp->fw_drv_pulse_wr_seq;
4188 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4190 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4191 MCP_PULSE_SEQ_MASK);
4192 /* The delta between driver pulse and mcp response
4193 * should be 1 (before mcp response) or 0 (after mcp response)
4195 if ((drv_pulse != mcp_pulse) &&
4196 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4197 /* someone lost a heartbeat... */
4198 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4199 drv_pulse, mcp_pulse);
4203 if ((bp->state == BNX2X_STATE_OPEN) ||
4204 (bp->state == BNX2X_STATE_DISABLED))
4205 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4208 mod_timer(&bp->timer, jiffies + bp->current_interval);
4211 /* end of Statistics */
4216 * nic init service functions
4219 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4221 int port = BP_PORT(bp);
4223 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4224 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4225 sizeof(struct ustorm_status_block)/4);
4226 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4227 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4228 sizeof(struct cstorm_status_block)/4);
4231 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4232 dma_addr_t mapping, int sb_id)
4234 int port = BP_PORT(bp);
4235 int func = BP_FUNC(bp);
4240 section = ((u64)mapping) + offsetof(struct host_status_block,
4242 sb->u_status_block.status_block_id = sb_id;
4244 REG_WR(bp, BAR_USTRORM_INTMEM +
4245 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4246 REG_WR(bp, BAR_USTRORM_INTMEM +
4247 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4249 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4250 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4252 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4253 REG_WR16(bp, BAR_USTRORM_INTMEM +
4254 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4257 section = ((u64)mapping) + offsetof(struct host_status_block,
4259 sb->c_status_block.status_block_id = sb_id;
4261 REG_WR(bp, BAR_CSTRORM_INTMEM +
4262 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4263 REG_WR(bp, BAR_CSTRORM_INTMEM +
4264 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4266 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4267 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4269 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4270 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4271 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4273 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4276 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4278 int func = BP_FUNC(bp);
4280 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4281 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4282 sizeof(struct ustorm_def_status_block)/4);
4283 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4284 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4285 sizeof(struct cstorm_def_status_block)/4);
4286 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4287 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4288 sizeof(struct xstorm_def_status_block)/4);
4289 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4290 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4291 sizeof(struct tstorm_def_status_block)/4);
4294 static void bnx2x_init_def_sb(struct bnx2x *bp,
4295 struct host_def_status_block *def_sb,
4296 dma_addr_t mapping, int sb_id)
4298 int port = BP_PORT(bp);
4299 int func = BP_FUNC(bp);
4300 int index, val, reg_offset;
4304 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4305 atten_status_block);
4306 def_sb->atten_status_block.status_block_id = sb_id;
4310 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4311 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4313 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4314 bp->attn_group[index].sig[0] = REG_RD(bp,
4315 reg_offset + 0x10*index);
4316 bp->attn_group[index].sig[1] = REG_RD(bp,
4317 reg_offset + 0x4 + 0x10*index);
4318 bp->attn_group[index].sig[2] = REG_RD(bp,
4319 reg_offset + 0x8 + 0x10*index);
4320 bp->attn_group[index].sig[3] = REG_RD(bp,
4321 reg_offset + 0xc + 0x10*index);
4324 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4325 HC_REG_ATTN_MSG0_ADDR_L);
4327 REG_WR(bp, reg_offset, U64_LO(section));
4328 REG_WR(bp, reg_offset + 4, U64_HI(section));
4330 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4332 val = REG_RD(bp, reg_offset);
4334 REG_WR(bp, reg_offset, val);
4337 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4338 u_def_status_block);
4339 def_sb->u_def_status_block.status_block_id = sb_id;
4341 REG_WR(bp, BAR_USTRORM_INTMEM +
4342 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4343 REG_WR(bp, BAR_USTRORM_INTMEM +
4344 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4346 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4347 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4349 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4350 REG_WR16(bp, BAR_USTRORM_INTMEM +
4351 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4354 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4355 c_def_status_block);
4356 def_sb->c_def_status_block.status_block_id = sb_id;
4358 REG_WR(bp, BAR_CSTRORM_INTMEM +
4359 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4360 REG_WR(bp, BAR_CSTRORM_INTMEM +
4361 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4363 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4364 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4366 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4367 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4368 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4371 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4372 t_def_status_block);
4373 def_sb->t_def_status_block.status_block_id = sb_id;
4375 REG_WR(bp, BAR_TSTRORM_INTMEM +
4376 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4377 REG_WR(bp, BAR_TSTRORM_INTMEM +
4378 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4380 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4381 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4383 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4384 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4385 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4388 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4389 x_def_status_block);
4390 def_sb->x_def_status_block.status_block_id = sb_id;
4392 REG_WR(bp, BAR_XSTRORM_INTMEM +
4393 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4394 REG_WR(bp, BAR_XSTRORM_INTMEM +
4395 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4397 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4398 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4400 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4401 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4402 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4404 bp->stats_pending = 0;
4405 bp->set_mac_pending = 0;
4407 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4410 static void bnx2x_update_coalesce(struct bnx2x *bp)
4412 int port = BP_PORT(bp);
4415 for_each_queue(bp, i) {
4416 int sb_id = bp->fp[i].sb_id;
4418 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4419 REG_WR8(bp, BAR_USTRORM_INTMEM +
4420 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4421 U_SB_ETH_RX_CQ_INDEX),
4423 REG_WR16(bp, BAR_USTRORM_INTMEM +
4424 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4425 U_SB_ETH_RX_CQ_INDEX),
4426 bp->rx_ticks ? 0 : 1);
4428 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4429 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4430 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4431 C_SB_ETH_TX_CQ_INDEX),
4433 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4434 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4435 C_SB_ETH_TX_CQ_INDEX),
4436 bp->tx_ticks ? 0 : 1);
4440 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4441 struct bnx2x_fastpath *fp, int last)
4445 for (i = 0; i < last; i++) {
4446 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4447 struct sk_buff *skb = rx_buf->skb;
4450 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4454 if (fp->tpa_state[i] == BNX2X_TPA_START)
4455 pci_unmap_single(bp->pdev,
4456 pci_unmap_addr(rx_buf, mapping),
4458 PCI_DMA_FROMDEVICE);
4465 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4467 int func = BP_FUNC(bp);
4468 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4469 ETH_MAX_AGGREGATION_QUEUES_E1H;
4470 u16 ring_prod, cqe_ring_prod;
4473 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4475 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4477 if (bp->flags & TPA_ENABLE_FLAG) {
4479 for_each_rx_queue(bp, j) {
4480 struct bnx2x_fastpath *fp = &bp->fp[j];
4482 for (i = 0; i < max_agg_queues; i++) {
4483 fp->tpa_pool[i].skb =
4484 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4485 if (!fp->tpa_pool[i].skb) {
4486 BNX2X_ERR("Failed to allocate TPA "
4487 "skb pool for queue[%d] - "
4488 "disabling TPA on this "
4490 bnx2x_free_tpa_pool(bp, fp, i);
4491 fp->disable_tpa = 1;
4494 pci_unmap_addr_set((struct sw_rx_bd *)
4495 &bp->fp->tpa_pool[i],
4497 fp->tpa_state[i] = BNX2X_TPA_STOP;
4502 for_each_rx_queue(bp, j) {
4503 struct bnx2x_fastpath *fp = &bp->fp[j];
4506 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4507 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4509 /* "next page" elements initialization */
4511 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4512 struct eth_rx_sge *sge;
4514 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4516 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4517 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4519 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4520 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4523 bnx2x_init_sge_ring_bit_mask(fp);
4526 for (i = 1; i <= NUM_RX_RINGS; i++) {
4527 struct eth_rx_bd *rx_bd;
4529 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4531 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4532 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4534 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4535 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4539 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4540 struct eth_rx_cqe_next_page *nextpg;
4542 nextpg = (struct eth_rx_cqe_next_page *)
4543 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4545 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4546 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4548 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4549 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4552 /* Allocate SGEs and initialize the ring elements */
4553 for (i = 0, ring_prod = 0;
4554 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4556 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4557 BNX2X_ERR("was only able to allocate "
4559 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4560 /* Cleanup already allocated elements */
4561 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4562 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4563 fp->disable_tpa = 1;
4567 ring_prod = NEXT_SGE_IDX(ring_prod);
4569 fp->rx_sge_prod = ring_prod;
4571 /* Allocate BDs and initialize BD ring */
4572 fp->rx_comp_cons = 0;
4573 cqe_ring_prod = ring_prod = 0;
4574 for (i = 0; i < bp->rx_ring_size; i++) {
4575 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4576 BNX2X_ERR("was only able to allocate "
4577 "%d rx skbs on queue[%d]\n", i, j);
4578 fp->eth_q_stats.rx_skb_alloc_failed++;
4581 ring_prod = NEXT_RX_IDX(ring_prod);
4582 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4583 WARN_ON(ring_prod <= i);
4586 fp->rx_bd_prod = ring_prod;
4587 /* must not have more available CQEs than BDs */
4588 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4590 fp->rx_pkt = fp->rx_calls = 0;
4593 * this will generate an interrupt (to the TSTORM)
4594 * must only be done after chip is initialized
4596 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4601 REG_WR(bp, BAR_USTRORM_INTMEM +
4602 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4603 U64_LO(fp->rx_comp_mapping));
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
4605 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4606 U64_HI(fp->rx_comp_mapping));
4610 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4614 for_each_tx_queue(bp, j) {
4615 struct bnx2x_fastpath *fp = &bp->fp[j];
4617 for (i = 1; i <= NUM_TX_RINGS; i++) {
4618 struct eth_tx_bd *tx_bd =
4619 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4622 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4623 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4625 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4626 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4629 fp->tx_pkt_prod = 0;
4630 fp->tx_pkt_cons = 0;
4633 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4638 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4640 int func = BP_FUNC(bp);
4642 spin_lock_init(&bp->spq_lock);
4644 bp->spq_left = MAX_SPQ_PENDING;
4645 bp->spq_prod_idx = 0;
4646 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4647 bp->spq_prod_bd = bp->spq;
4648 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4650 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4651 U64_LO(bp->spq_mapping));
4653 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4654 U64_HI(bp->spq_mapping));
4656 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4660 static void bnx2x_init_context(struct bnx2x *bp)
4664 for_each_queue(bp, i) {
4665 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4666 struct bnx2x_fastpath *fp = &bp->fp[i];
4667 u8 cl_id = fp->cl_id;
4668 u8 sb_id = fp->sb_id;
4670 context->ustorm_st_context.common.sb_index_numbers =
4671 BNX2X_RX_SB_INDEX_NUM;
4672 context->ustorm_st_context.common.clientId = cl_id;
4673 context->ustorm_st_context.common.status_block_id = sb_id;
4674 context->ustorm_st_context.common.flags =
4675 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4676 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4677 context->ustorm_st_context.common.statistics_counter_id =
4679 context->ustorm_st_context.common.mc_alignment_log_size =
4680 BNX2X_RX_ALIGN_SHIFT;
4681 context->ustorm_st_context.common.bd_buff_size =
4683 context->ustorm_st_context.common.bd_page_base_hi =
4684 U64_HI(fp->rx_desc_mapping);
4685 context->ustorm_st_context.common.bd_page_base_lo =
4686 U64_LO(fp->rx_desc_mapping);
4687 if (!fp->disable_tpa) {
4688 context->ustorm_st_context.common.flags |=
4689 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4690 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4691 context->ustorm_st_context.common.sge_buff_size =
4692 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4694 context->ustorm_st_context.common.sge_page_base_hi =
4695 U64_HI(fp->rx_sge_mapping);
4696 context->ustorm_st_context.common.sge_page_base_lo =
4697 U64_LO(fp->rx_sge_mapping);
4700 context->ustorm_ag_context.cdu_usage =
4701 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4702 CDU_REGION_NUMBER_UCM_AG,
4703 ETH_CONNECTION_TYPE);
4705 context->xstorm_st_context.tx_bd_page_base_hi =
4706 U64_HI(fp->tx_desc_mapping);
4707 context->xstorm_st_context.tx_bd_page_base_lo =
4708 U64_LO(fp->tx_desc_mapping);
4709 context->xstorm_st_context.db_data_addr_hi =
4710 U64_HI(fp->tx_prods_mapping);
4711 context->xstorm_st_context.db_data_addr_lo =
4712 U64_LO(fp->tx_prods_mapping);
4713 context->xstorm_st_context.statistics_data = (cl_id |
4714 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4715 context->cstorm_st_context.sb_index_number =
4716 C_SB_ETH_TX_CQ_INDEX;
4717 context->cstorm_st_context.status_block_id = sb_id;
4719 context->xstorm_ag_context.cdu_reserved =
4720 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4721 CDU_REGION_NUMBER_XCM_AG,
4722 ETH_CONNECTION_TYPE);
4726 static void bnx2x_init_ind_table(struct bnx2x *bp)
4728 int func = BP_FUNC(bp);
4731 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4735 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4736 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4737 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4738 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4739 bp->fp->cl_id + (i % bp->num_rx_queues));
4742 static void bnx2x_set_client_config(struct bnx2x *bp)
4744 struct tstorm_eth_client_config tstorm_client = {0};
4745 int port = BP_PORT(bp);
4748 tstorm_client.mtu = bp->dev->mtu;
4749 tstorm_client.config_flags =
4750 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4751 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4753 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4754 tstorm_client.config_flags |=
4755 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4756 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4760 if (bp->flags & TPA_ENABLE_FLAG) {
4761 tstorm_client.max_sges_for_packet =
4762 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4763 tstorm_client.max_sges_for_packet =
4764 ((tstorm_client.max_sges_for_packet +
4765 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4766 PAGES_PER_SGE_SHIFT;
4768 tstorm_client.config_flags |=
4769 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4772 for_each_queue(bp, i) {
4773 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4775 REG_WR(bp, BAR_TSTRORM_INTMEM +
4776 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4777 ((u32 *)&tstorm_client)[0]);
4778 REG_WR(bp, BAR_TSTRORM_INTMEM +
4779 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4780 ((u32 *)&tstorm_client)[1]);
4783 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4784 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4787 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4789 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4790 int mode = bp->rx_mode;
4791 int mask = (1 << BP_L_ID(bp));
4792 int func = BP_FUNC(bp);
4795 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4798 case BNX2X_RX_MODE_NONE: /* no Rx */
4799 tstorm_mac_filter.ucast_drop_all = mask;
4800 tstorm_mac_filter.mcast_drop_all = mask;
4801 tstorm_mac_filter.bcast_drop_all = mask;
4803 case BNX2X_RX_MODE_NORMAL:
4804 tstorm_mac_filter.bcast_accept_all = mask;
4806 case BNX2X_RX_MODE_ALLMULTI:
4807 tstorm_mac_filter.mcast_accept_all = mask;
4808 tstorm_mac_filter.bcast_accept_all = mask;
4810 case BNX2X_RX_MODE_PROMISC:
4811 tstorm_mac_filter.ucast_accept_all = mask;
4812 tstorm_mac_filter.mcast_accept_all = mask;
4813 tstorm_mac_filter.bcast_accept_all = mask;
4816 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4820 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4821 REG_WR(bp, BAR_TSTRORM_INTMEM +
4822 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4823 ((u32 *)&tstorm_mac_filter)[i]);
4825 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4826 ((u32 *)&tstorm_mac_filter)[i]); */
4829 if (mode != BNX2X_RX_MODE_NONE)
4830 bnx2x_set_client_config(bp);
4833 static void bnx2x_init_internal_common(struct bnx2x *bp)
4837 if (bp->flags & TPA_ENABLE_FLAG) {
4838 struct tstorm_eth_tpa_exist tpa = {0};
4842 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4844 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4848 /* Zero this manually as its initialization is
4849 currently missing in the initTool */
4850 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4851 REG_WR(bp, BAR_USTRORM_INTMEM +
4852 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4855 static void bnx2x_init_internal_port(struct bnx2x *bp)
4857 int port = BP_PORT(bp);
4859 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4860 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4861 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4862 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4865 /* Calculates the sum of vn_min_rates.
4866 It's needed for further normalizing of the min_rates.
4868 sum of vn_min_rates.
4870 0 - if all the min_rates are 0.
4871 In the later case fainess algorithm should be deactivated.
4872 If not all min_rates are zero then those that are zeroes will be set to 1.
4874 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4877 int port = BP_PORT(bp);
4880 bp->vn_weight_sum = 0;
4881 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4882 int func = 2*vn + port;
4884 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4885 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4886 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4888 /* Skip hidden vns */
4889 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4892 /* If min rate is zero - set it to 1 */
4894 vn_min_rate = DEF_MIN_RATE;
4898 bp->vn_weight_sum += vn_min_rate;
4901 /* ... only if all min rates are zeros - disable fairness */
4903 bp->vn_weight_sum = 0;
4906 static void bnx2x_init_internal_func(struct bnx2x *bp)
4908 struct tstorm_eth_function_common_config tstorm_config = {0};
4909 struct stats_indication_flags stats_flags = {0};
4910 int port = BP_PORT(bp);
4911 int func = BP_FUNC(bp);
4917 tstorm_config.config_flags = MULTI_FLAGS(bp);
4918 tstorm_config.rss_result_mask = MULTI_MASK;
4921 tstorm_config.config_flags |=
4922 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4924 tstorm_config.leading_client_id = BP_L_ID(bp);
4926 REG_WR(bp, BAR_TSTRORM_INTMEM +
4927 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4928 (*(u32 *)&tstorm_config));
4930 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4931 bnx2x_set_storm_rx_mode(bp);
4933 for_each_queue(bp, i) {
4934 u8 cl_id = bp->fp[i].cl_id;
4936 /* reset xstorm per client statistics */
4937 offset = BAR_XSTRORM_INTMEM +
4938 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4940 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4941 REG_WR(bp, offset + j*4, 0);
4943 /* reset tstorm per client statistics */
4944 offset = BAR_TSTRORM_INTMEM +
4945 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4947 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4948 REG_WR(bp, offset + j*4, 0);
4950 /* reset ustorm per client statistics */
4951 offset = BAR_USTRORM_INTMEM +
4952 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4954 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
4958 /* Init statistics related context */
4959 stats_flags.collect_eth = 1;
4961 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4962 ((u32 *)&stats_flags)[0]);
4963 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4964 ((u32 *)&stats_flags)[1]);
4966 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4967 ((u32 *)&stats_flags)[0]);
4968 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4969 ((u32 *)&stats_flags)[1]);
4971 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4972 ((u32 *)&stats_flags)[0]);
4973 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4974 ((u32 *)&stats_flags)[1]);
4976 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4977 ((u32 *)&stats_flags)[0]);
4978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4979 ((u32 *)&stats_flags)[1]);
4981 REG_WR(bp, BAR_XSTRORM_INTMEM +
4982 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4983 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4984 REG_WR(bp, BAR_XSTRORM_INTMEM +
4985 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4986 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4988 REG_WR(bp, BAR_TSTRORM_INTMEM +
4989 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991 REG_WR(bp, BAR_TSTRORM_INTMEM +
4992 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4995 REG_WR(bp, BAR_USTRORM_INTMEM +
4996 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_USTRORM_INTMEM +
4999 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5002 if (CHIP_IS_E1H(bp)) {
5003 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5005 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5007 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5009 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5012 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5016 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5018 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5019 SGE_PAGE_SIZE * PAGES_PER_SGE),
5021 for_each_rx_queue(bp, i) {
5022 struct bnx2x_fastpath *fp = &bp->fp[i];
5024 REG_WR(bp, BAR_USTRORM_INTMEM +
5025 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5026 U64_LO(fp->rx_comp_mapping));
5027 REG_WR(bp, BAR_USTRORM_INTMEM +
5028 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5029 U64_HI(fp->rx_comp_mapping));
5031 REG_WR16(bp, BAR_USTRORM_INTMEM +
5032 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5036 /* dropless flow control */
5037 if (CHIP_IS_E1H(bp)) {
5038 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5040 rx_pause.bd_thr_low = 250;
5041 rx_pause.cqe_thr_low = 250;
5043 rx_pause.sge_thr_low = 0;
5044 rx_pause.bd_thr_high = 350;
5045 rx_pause.cqe_thr_high = 350;
5046 rx_pause.sge_thr_high = 0;
5048 for_each_rx_queue(bp, i) {
5049 struct bnx2x_fastpath *fp = &bp->fp[i];
5051 if (!fp->disable_tpa) {
5052 rx_pause.sge_thr_low = 150;
5053 rx_pause.sge_thr_high = 250;
5057 offset = BAR_USTRORM_INTMEM +
5058 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5061 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5063 REG_WR(bp, offset + j*4,
5064 ((u32 *)&rx_pause)[j]);
5068 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5070 /* Init rate shaping and fairness contexts */
5074 /* During init there is no active link
5075 Until link is up, set link rate to 10Gbps */
5076 bp->link_vars.line_speed = SPEED_10000;
5077 bnx2x_init_port_minmax(bp);
5079 bnx2x_calc_vn_weight_sum(bp);
5081 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5082 bnx2x_init_vn_minmax(bp, 2*vn + port);
5084 /* Enable rate shaping and fairness */
5085 bp->cmng.flags.cmng_enables =
5086 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5087 if (bp->vn_weight_sum)
5088 bp->cmng.flags.cmng_enables |=
5089 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5091 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5092 " fairness will be disabled\n");
5094 /* rate shaping and fairness are disabled */
5096 "single function mode minmax will be disabled\n");
5100 /* Store it to internal memory */
5102 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5103 REG_WR(bp, BAR_XSTRORM_INTMEM +
5104 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5105 ((u32 *)(&bp->cmng))[i]);
5108 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5110 switch (load_code) {
5111 case FW_MSG_CODE_DRV_LOAD_COMMON:
5112 bnx2x_init_internal_common(bp);
5115 case FW_MSG_CODE_DRV_LOAD_PORT:
5116 bnx2x_init_internal_port(bp);
5119 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5120 bnx2x_init_internal_func(bp);
5124 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5129 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5133 for_each_queue(bp, i) {
5134 struct bnx2x_fastpath *fp = &bp->fp[i];
5137 fp->state = BNX2X_FP_STATE_CLOSED;
5139 fp->cl_id = BP_L_ID(bp) + i;
5140 fp->sb_id = fp->cl_id;
5142 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5143 bp, fp->status_blk, i, fp->cl_id, fp->sb_id);
5144 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5146 bnx2x_update_fpsb_idx(fp);
5149 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5151 bnx2x_update_dsb_idx(bp);
5152 bnx2x_update_coalesce(bp);
5153 bnx2x_init_rx_rings(bp);
5154 bnx2x_init_tx_ring(bp);
5155 bnx2x_init_sp_ring(bp);
5156 bnx2x_init_context(bp);
5157 bnx2x_init_internal(bp, load_code);
5158 bnx2x_init_ind_table(bp);
5159 bnx2x_stats_init(bp);
5161 /* At this point, we are ready for interrupts */
5162 atomic_set(&bp->intr_sem, 0);
5164 /* flush all before enabling interrupts */
5168 bnx2x_int_enable(bp);
5171 /* end of nic init */
5174 * gzip service functions
5177 static int bnx2x_gunzip_init(struct bnx2x *bp)
5179 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5180 &bp->gunzip_mapping);
5181 if (bp->gunzip_buf == NULL)
5184 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5185 if (bp->strm == NULL)
5188 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5190 if (bp->strm->workspace == NULL)
5200 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5201 bp->gunzip_mapping);
5202 bp->gunzip_buf = NULL;
5205 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5206 " un-compression\n", bp->dev->name);
5210 static void bnx2x_gunzip_end(struct bnx2x *bp)
5212 kfree(bp->strm->workspace);
5217 if (bp->gunzip_buf) {
5218 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5219 bp->gunzip_mapping);
5220 bp->gunzip_buf = NULL;
5224 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5228 /* check gzip header */
5229 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5236 if (zbuf[3] & FNAME)
5237 while ((zbuf[n++] != 0) && (n < len));
5239 bp->strm->next_in = zbuf + n;
5240 bp->strm->avail_in = len - n;
5241 bp->strm->next_out = bp->gunzip_buf;
5242 bp->strm->avail_out = FW_BUF_SIZE;
5244 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5248 rc = zlib_inflate(bp->strm, Z_FINISH);
5249 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5250 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5251 bp->dev->name, bp->strm->msg);
5253 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5254 if (bp->gunzip_outlen & 0x3)
5255 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5256 " gunzip_outlen (%d) not aligned\n",
5257 bp->dev->name, bp->gunzip_outlen);
5258 bp->gunzip_outlen >>= 2;
5260 zlib_inflateEnd(bp->strm);
5262 if (rc == Z_STREAM_END)
5268 /* nic load/unload */
5271 * General service functions
5274 /* send a NIG loopback debug packet */
5275 static void bnx2x_lb_pckt(struct bnx2x *bp)
5279 /* Ethernet source and destination addresses */
5280 wb_write[0] = 0x55555555;
5281 wb_write[1] = 0x55555555;
5282 wb_write[2] = 0x20; /* SOP */
5283 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5285 /* NON-IP protocol */
5286 wb_write[0] = 0x09000000;
5287 wb_write[1] = 0x55555555;
5288 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5289 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5292 /* some of the internal memories
5293 * are not directly readable from the driver
5294 * to test them we send debug packets
5296 static int bnx2x_int_mem_test(struct bnx2x *bp)
5302 if (CHIP_REV_IS_FPGA(bp))
5304 else if (CHIP_REV_IS_EMUL(bp))
5309 DP(NETIF_MSG_HW, "start part1\n");
5311 /* Disable inputs of parser neighbor blocks */
5312 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5313 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5314 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5315 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5317 /* Write 0 to parser credits for CFC search request */
5318 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5320 /* send Ethernet packet */
5323 /* TODO do i reset NIG statistic? */
5324 /* Wait until NIG register shows 1 packet of size 0x10 */
5325 count = 1000 * factor;
5328 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5329 val = *bnx2x_sp(bp, wb_data[0]);
5337 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5341 /* Wait until PRS register shows 1 packet */
5342 count = 1000 * factor;
5344 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5352 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5356 /* Reset and init BRB, PRS */
5357 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5359 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5361 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5362 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5364 DP(NETIF_MSG_HW, "part2\n");
5366 /* Disable inputs of parser neighbor blocks */
5367 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5368 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5369 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5370 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5372 /* Write 0 to parser credits for CFC search request */
5373 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5375 /* send 10 Ethernet packets */
5376 for (i = 0; i < 10; i++)
5379 /* Wait until NIG register shows 10 + 1
5380 packets of size 11*0x10 = 0xb0 */
5381 count = 1000 * factor;
5384 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5385 val = *bnx2x_sp(bp, wb_data[0]);
5393 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5397 /* Wait until PRS register shows 2 packets */
5398 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5400 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5402 /* Write 1 to parser credits for CFC search request */
5403 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5405 /* Wait until PRS register shows 3 packets */
5406 msleep(10 * factor);
5407 /* Wait until NIG register shows 1 packet of size 0x10 */
5408 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5410 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5412 /* clear NIG EOP FIFO */
5413 for (i = 0; i < 11; i++)
5414 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5415 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5417 BNX2X_ERR("clear of NIG failed\n");
5421 /* Reset and init BRB, PRS, NIG */
5422 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5424 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5426 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5427 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5430 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5433 /* Enable inputs of parser neighbor blocks */
5434 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5435 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5436 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5437 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5439 DP(NETIF_MSG_HW, "done\n");
5444 static void enable_blocks_attention(struct bnx2x *bp)
5446 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5447 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5448 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5449 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5450 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5451 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5452 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5453 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5454 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5455 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5456 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5457 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5458 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5459 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5460 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5461 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5462 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5463 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5464 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5465 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5466 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5467 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5468 if (CHIP_REV_IS_FPGA(bp))
5469 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5471 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5472 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5473 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5474 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5475 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5476 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5477 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5478 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5479 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5480 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5484 static void bnx2x_reset_common(struct bnx2x *bp)
5487 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5489 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5492 static int bnx2x_init_common(struct bnx2x *bp)
5496 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5498 bnx2x_reset_common(bp);
5499 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5500 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5502 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5503 if (CHIP_IS_E1H(bp))
5504 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5506 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5508 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5510 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5511 if (CHIP_IS_E1(bp)) {
5512 /* enable HW interrupt from PXP on USDM overflow
5513 bit 16 on INT_MASK_0 */
5514 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5517 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5521 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5522 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5523 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5524 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5525 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5526 /* make sure this value is 0 */
5527 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5529 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5530 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5531 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5532 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5533 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5536 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5538 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5539 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5540 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5543 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5544 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5546 /* let the HW do it's magic ... */
5548 /* finish PXP init */
5549 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5551 BNX2X_ERR("PXP2 CFG failed\n");
5554 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5556 BNX2X_ERR("PXP2 RD_INIT failed\n");
5560 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5561 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5563 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5565 /* clean the DMAE memory */
5567 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5569 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5570 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5571 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5572 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5574 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5575 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5576 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5577 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5579 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5580 /* soft reset pulse */
5581 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5582 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5585 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5588 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5589 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5590 if (!CHIP_REV_IS_SLOW(bp)) {
5591 /* enable hw interrupt from doorbell Q */
5592 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5595 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5596 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5597 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5599 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5600 if (CHIP_IS_E1H(bp))
5601 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5603 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5604 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5605 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5606 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5608 if (CHIP_IS_E1H(bp)) {
5609 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5610 STORM_INTMEM_SIZE_E1H/2);
5612 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5613 0, STORM_INTMEM_SIZE_E1H/2);
5614 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5615 STORM_INTMEM_SIZE_E1H/2);
5617 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5618 0, STORM_INTMEM_SIZE_E1H/2);
5619 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5620 STORM_INTMEM_SIZE_E1H/2);
5622 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5623 0, STORM_INTMEM_SIZE_E1H/2);
5624 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5625 STORM_INTMEM_SIZE_E1H/2);
5627 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5628 0, STORM_INTMEM_SIZE_E1H/2);
5630 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5631 STORM_INTMEM_SIZE_E1);
5632 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5633 STORM_INTMEM_SIZE_E1);
5634 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5635 STORM_INTMEM_SIZE_E1);
5636 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5637 STORM_INTMEM_SIZE_E1);
5640 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5641 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5642 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5643 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5646 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5648 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5651 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5652 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5653 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5655 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5656 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5657 REG_WR(bp, i, 0xc0cac01a);
5658 /* TODO: replace with something meaningful */
5660 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5661 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5663 if (sizeof(union cdu_context) != 1024)
5664 /* we currently assume that a context is 1024 bytes */
5665 printk(KERN_ALERT PFX "please adjust the size of"
5666 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5668 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5669 val = (4 << 24) + (0 << 12) + 1024;
5670 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5671 if (CHIP_IS_E1(bp)) {
5672 /* !!! fix pxp client crdit until excel update */
5673 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5674 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5677 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5678 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5679 /* enable context validation interrupt from CFC */
5680 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5682 /* set the thresholds to prevent CFC/CDU race */
5683 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5685 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5686 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5688 /* PXPCS COMMON comes here */
5689 /* Reset PCIE errors for debug */
5690 REG_WR(bp, 0x2814, 0xffffffff);
5691 REG_WR(bp, 0x3820, 0xffffffff);
5693 /* EMAC0 COMMON comes here */
5694 /* EMAC1 COMMON comes here */
5695 /* DBU COMMON comes here */
5696 /* DBG COMMON comes here */
5698 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5699 if (CHIP_IS_E1H(bp)) {
5700 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5701 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5704 if (CHIP_REV_IS_SLOW(bp))
5707 /* finish CFC init */
5708 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5710 BNX2X_ERR("CFC LL_INIT failed\n");
5713 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5715 BNX2X_ERR("CFC AC_INIT failed\n");
5718 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5720 BNX2X_ERR("CFC CAM_INIT failed\n");
5723 REG_WR(bp, CFC_REG_DEBUG0, 0);
5725 /* read NIG statistic
5726 to see if this is our first up since powerup */
5727 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5728 val = *bnx2x_sp(bp, wb_data[0]);
5730 /* do internal memory self test */
5731 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5732 BNX2X_ERR("internal mem self test failed\n");
5736 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5740 bp->port.need_hw_lock = 1;
5743 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5744 /* Fan failure is indicated by SPIO 5 */
5745 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5746 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5748 /* set to active low mode */
5749 val = REG_RD(bp, MISC_REG_SPIO_INT);
5750 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5751 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5752 REG_WR(bp, MISC_REG_SPIO_INT, val);
5754 /* enable interrupt to signal the IGU */
5755 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5756 val |= (1 << MISC_REGISTERS_SPIO_5);
5757 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5764 /* clear PXP2 attentions */
5765 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5767 enable_blocks_attention(bp);
5769 if (!BP_NOMCP(bp)) {
5770 bnx2x_acquire_phy_lock(bp);
5771 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5772 bnx2x_release_phy_lock(bp);
5774 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5779 static int bnx2x_init_port(struct bnx2x *bp)
5781 int port = BP_PORT(bp);
5785 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5787 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5789 /* Port PXP comes here */
5790 /* Port PXP2 comes here */
5795 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5796 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5797 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5798 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5803 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5804 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5805 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5806 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5811 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5812 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5813 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5814 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5816 /* Port CMs come here */
5817 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5818 (port ? XCM_PORT1_END : XCM_PORT0_END));
5820 /* Port QM comes here */
5822 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5823 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5825 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5826 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5828 /* Port DQ comes here */
5830 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5831 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5832 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5833 /* no pause for emulation and FPGA */
5838 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5839 else if (bp->dev->mtu > 4096) {
5840 if (bp->flags & ONE_PORT_FLAG)
5844 /* (24*1024 + val*4)/256 */
5845 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5848 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5849 high = low + 56; /* 14*1024/256 */
5851 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5852 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5855 /* Port PRS comes here */
5856 /* Port TSDM comes here */
5857 /* Port CSDM comes here */
5858 /* Port USDM comes here */
5859 /* Port XSDM comes here */
5860 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5861 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5862 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5863 port ? USEM_PORT1_END : USEM_PORT0_END);
5864 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5865 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5866 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5867 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5868 /* Port UPB comes here */
5869 /* Port XPB comes here */
5871 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5872 port ? PBF_PORT1_END : PBF_PORT0_END);
5874 /* configure PBF to work without PAUSE mtu 9000 */
5875 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5877 /* update threshold */
5878 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5879 /* update init credit */
5880 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5883 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5885 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5888 /* tell the searcher where the T2 table is */
5889 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5891 wb_write[0] = U64_LO(bp->t2_mapping);
5892 wb_write[1] = U64_HI(bp->t2_mapping);
5893 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5894 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5895 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5896 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5898 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5899 /* Port SRCH comes here */
5901 /* Port CDU comes here */
5902 /* Port CFC comes here */
5904 if (CHIP_IS_E1(bp)) {
5905 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5906 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5908 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5909 port ? HC_PORT1_END : HC_PORT0_END);
5911 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5912 MISC_AEU_PORT0_START,
5913 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5914 /* init aeu_mask_attn_func_0/1:
5915 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5916 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5917 * bits 4-7 are used for "per vn group attention" */
5918 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5919 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5921 /* Port PXPCS comes here */
5922 /* Port EMAC0 comes here */
5923 /* Port EMAC1 comes here */
5924 /* Port DBU comes here */
5925 /* Port DBG comes here */
5926 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5927 port ? NIG_PORT1_END : NIG_PORT0_END);
5929 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5931 if (CHIP_IS_E1H(bp)) {
5932 /* 0x2 disable e1hov, 0x1 enable */
5933 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5934 (IS_E1HMF(bp) ? 0x1 : 0x2));
5936 /* support pause requests from USDM, TSDM and BRB */
5937 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5940 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5941 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5942 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5946 /* Port MCP comes here */
5947 /* Port DMAE comes here */
5949 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5950 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5952 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5954 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5955 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5957 /* The GPIO should be swapped if the swap register is
5959 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5960 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5962 /* Select function upon port-swap configuration */
5964 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5965 aeu_gpio_mask = (swap_val && swap_override) ?
5966 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5967 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5969 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5970 aeu_gpio_mask = (swap_val && swap_override) ?
5971 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5972 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5974 val = REG_RD(bp, offset);
5975 /* add GPIO3 to group */
5976 val |= aeu_gpio_mask;
5977 REG_WR(bp, offset, val);
5981 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5982 /* add SPIO 5 to group 0 */
5983 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5984 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5985 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5992 bnx2x__link_reset(bp);
5997 #define ILT_PER_FUNC (768/2)
5998 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5999 /* the phys address is shifted right 12 bits and has an added
6000 1=valid bit added to the 53rd bit
6001 then since this is a wide register(TM)
6002 we split it into two 32 bit writes
6004 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6005 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6006 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6007 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6009 #define CNIC_ILT_LINES 0
6011 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6015 if (CHIP_IS_E1H(bp))
6016 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6018 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6020 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6023 static int bnx2x_init_func(struct bnx2x *bp)
6025 int port = BP_PORT(bp);
6026 int func = BP_FUNC(bp);
6030 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6032 /* set MSI reconfigure capability */
6033 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6034 val = REG_RD(bp, addr);
6035 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6036 REG_WR(bp, addr, val);
6038 i = FUNC_ILT_BASE(func);
6040 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6041 if (CHIP_IS_E1H(bp)) {
6042 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6043 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6045 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6046 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6049 if (CHIP_IS_E1H(bp)) {
6050 for (i = 0; i < 9; i++)
6051 bnx2x_init_block(bp,
6052 cm_start[func][i], cm_end[func][i]);
6054 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6055 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6058 /* HC init per function */
6059 if (CHIP_IS_E1H(bp)) {
6060 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6062 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6063 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6065 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6067 /* Reset PCIE errors for debug */
6068 REG_WR(bp, 0x2114, 0xffffffff);
6069 REG_WR(bp, 0x2120, 0xffffffff);
6074 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6078 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6079 BP_FUNC(bp), load_code);
6082 mutex_init(&bp->dmae_mutex);
6083 bnx2x_gunzip_init(bp);
6085 switch (load_code) {
6086 case FW_MSG_CODE_DRV_LOAD_COMMON:
6087 rc = bnx2x_init_common(bp);
6092 case FW_MSG_CODE_DRV_LOAD_PORT:
6094 rc = bnx2x_init_port(bp);
6099 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6101 rc = bnx2x_init_func(bp);
6107 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6111 if (!BP_NOMCP(bp)) {
6112 int func = BP_FUNC(bp);
6114 bp->fw_drv_pulse_wr_seq =
6115 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6116 DRV_PULSE_SEQ_MASK);
6117 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6118 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6119 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6123 /* this needs to be done before gunzip end */
6124 bnx2x_zero_def_sb(bp);
6125 for_each_queue(bp, i)
6126 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6129 bnx2x_gunzip_end(bp);
6134 /* send the MCP a request, block until there is a reply */
6135 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6137 int func = BP_FUNC(bp);
6138 u32 seq = ++bp->fw_seq;
6141 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6143 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6144 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6147 /* let the FW do it's magic ... */
6150 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6152 /* Give the FW up to 2 second (200*10ms) */
6153 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6155 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6156 cnt*delay, rc, seq);
6158 /* is this a reply to our command? */
6159 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6160 rc &= FW_MSG_CODE_MASK;
6164 BNX2X_ERR("FW failed to respond!\n");
6172 static void bnx2x_free_mem(struct bnx2x *bp)
6175 #define BNX2X_PCI_FREE(x, y, size) \
6178 pci_free_consistent(bp->pdev, size, x, y); \
6184 #define BNX2X_FREE(x) \
6196 for_each_queue(bp, i) {
6199 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6200 bnx2x_fp(bp, i, status_blk_mapping),
6201 sizeof(struct host_status_block) +
6202 sizeof(struct eth_tx_db_data));
6205 for_each_rx_queue(bp, i) {
6207 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6208 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6209 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6210 bnx2x_fp(bp, i, rx_desc_mapping),
6211 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6213 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6214 bnx2x_fp(bp, i, rx_comp_mapping),
6215 sizeof(struct eth_fast_path_rx_cqe) *
6219 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6220 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6221 bnx2x_fp(bp, i, rx_sge_mapping),
6222 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6225 for_each_tx_queue(bp, i) {
6227 /* fastpath tx rings: tx_buf tx_desc */
6228 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6229 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6230 bnx2x_fp(bp, i, tx_desc_mapping),
6231 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6233 /* end of fastpath */
6235 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6236 sizeof(struct host_def_status_block));
6238 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6239 sizeof(struct bnx2x_slowpath));
6242 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6243 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6244 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6245 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6247 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6249 #undef BNX2X_PCI_FREE
6253 static int bnx2x_alloc_mem(struct bnx2x *bp)
6256 #define BNX2X_PCI_ALLOC(x, y, size) \
6258 x = pci_alloc_consistent(bp->pdev, size, y); \
6260 goto alloc_mem_err; \
6261 memset(x, 0, size); \
6264 #define BNX2X_ALLOC(x, size) \
6266 x = vmalloc(size); \
6268 goto alloc_mem_err; \
6269 memset(x, 0, size); \
6276 for_each_queue(bp, i) {
6277 bnx2x_fp(bp, i, bp) = bp;
6280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6281 &bnx2x_fp(bp, i, status_blk_mapping),
6282 sizeof(struct host_status_block) +
6283 sizeof(struct eth_tx_db_data));
6286 for_each_rx_queue(bp, i) {
6288 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6289 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6290 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6291 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6292 &bnx2x_fp(bp, i, rx_desc_mapping),
6293 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6295 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6296 &bnx2x_fp(bp, i, rx_comp_mapping),
6297 sizeof(struct eth_fast_path_rx_cqe) *
6301 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6302 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6303 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6304 &bnx2x_fp(bp, i, rx_sge_mapping),
6305 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6308 for_each_tx_queue(bp, i) {
6310 bnx2x_fp(bp, i, hw_tx_prods) =
6311 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6313 bnx2x_fp(bp, i, tx_prods_mapping) =
6314 bnx2x_fp(bp, i, status_blk_mapping) +
6315 sizeof(struct host_status_block);
6317 /* fastpath tx rings: tx_buf tx_desc */
6318 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6319 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6320 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6321 &bnx2x_fp(bp, i, tx_desc_mapping),
6322 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6324 /* end of fastpath */
6326 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6327 sizeof(struct host_def_status_block));
6329 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6330 sizeof(struct bnx2x_slowpath));
6333 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6336 for (i = 0; i < 64*1024; i += 64) {
6337 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6338 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6341 /* allocate searcher T2 table
6342 we allocate 1/4 of alloc num for T2
6343 (which is not entered into the ILT) */
6344 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6347 for (i = 0; i < 16*1024; i += 64)
6348 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6350 /* now fixup the last line in the block to point to the next block */
6351 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6353 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6354 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6356 /* QM queues (128*MAX_CONN) */
6357 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6360 /* Slow path ring */
6361 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6369 #undef BNX2X_PCI_ALLOC
6373 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6377 for_each_tx_queue(bp, i) {
6378 struct bnx2x_fastpath *fp = &bp->fp[i];
6380 u16 bd_cons = fp->tx_bd_cons;
6381 u16 sw_prod = fp->tx_pkt_prod;
6382 u16 sw_cons = fp->tx_pkt_cons;
6384 while (sw_cons != sw_prod) {
6385 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6391 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6395 for_each_rx_queue(bp, j) {
6396 struct bnx2x_fastpath *fp = &bp->fp[j];
6398 for (i = 0; i < NUM_RX_BD; i++) {
6399 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6400 struct sk_buff *skb = rx_buf->skb;
6405 pci_unmap_single(bp->pdev,
6406 pci_unmap_addr(rx_buf, mapping),
6408 PCI_DMA_FROMDEVICE);
6413 if (!fp->disable_tpa)
6414 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6415 ETH_MAX_AGGREGATION_QUEUES_E1 :
6416 ETH_MAX_AGGREGATION_QUEUES_E1H);
6420 static void bnx2x_free_skbs(struct bnx2x *bp)
6422 bnx2x_free_tx_skbs(bp);
6423 bnx2x_free_rx_skbs(bp);
6426 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6430 free_irq(bp->msix_table[0].vector, bp->dev);
6431 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6432 bp->msix_table[0].vector);
6434 for_each_queue(bp, i) {
6435 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6436 "state %x\n", i, bp->msix_table[i + offset].vector,
6437 bnx2x_fp(bp, i, state));
6439 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6443 static void bnx2x_free_irq(struct bnx2x *bp)
6445 if (bp->flags & USING_MSIX_FLAG) {
6446 bnx2x_free_msix_irqs(bp);
6447 pci_disable_msix(bp->pdev);
6448 bp->flags &= ~USING_MSIX_FLAG;
6450 } else if (bp->flags & USING_MSI_FLAG) {
6451 free_irq(bp->pdev->irq, bp->dev);
6452 pci_disable_msi(bp->pdev);
6453 bp->flags &= ~USING_MSI_FLAG;
6456 free_irq(bp->pdev->irq, bp->dev);
6459 static int bnx2x_enable_msix(struct bnx2x *bp)
6461 int i, rc, offset = 1;
6464 bp->msix_table[0].entry = igu_vec;
6465 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6467 for_each_queue(bp, i) {
6468 igu_vec = BP_L_ID(bp) + offset + i;
6469 bp->msix_table[i + offset].entry = igu_vec;
6470 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6471 "(fastpath #%u)\n", i + offset, igu_vec, i);
6474 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6475 BNX2X_NUM_QUEUES(bp) + offset);
6477 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6481 bp->flags |= USING_MSIX_FLAG;
6486 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6488 int i, rc, offset = 1;
6490 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6491 bp->dev->name, bp->dev);
6493 BNX2X_ERR("request sp irq failed\n");
6497 for_each_queue(bp, i) {
6498 struct bnx2x_fastpath *fp = &bp->fp[i];
6500 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6501 rc = request_irq(bp->msix_table[i + offset].vector,
6502 bnx2x_msix_fp_int, 0, fp->name, fp);
6504 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6505 bnx2x_free_msix_irqs(bp);
6509 fp->state = BNX2X_FP_STATE_IRQ;
6512 i = BNX2X_NUM_QUEUES(bp);
6514 printk(KERN_INFO PFX
6515 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6516 bp->dev->name, bp->msix_table[0].vector,
6517 bp->msix_table[offset].vector,
6518 bp->msix_table[offset + i - 1].vector);
6520 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6521 bp->dev->name, bp->msix_table[0].vector,
6522 bp->msix_table[offset + i - 1].vector);
6527 static int bnx2x_enable_msi(struct bnx2x *bp)
6531 rc = pci_enable_msi(bp->pdev);
6533 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6536 bp->flags |= USING_MSI_FLAG;
6541 static int bnx2x_req_irq(struct bnx2x *bp)
6543 unsigned long flags;
6546 if (bp->flags & USING_MSI_FLAG)
6549 flags = IRQF_SHARED;
6551 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6552 bp->dev->name, bp->dev);
6554 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6559 static void bnx2x_napi_enable(struct bnx2x *bp)
6563 for_each_rx_queue(bp, i)
6564 napi_enable(&bnx2x_fp(bp, i, napi));
6567 static void bnx2x_napi_disable(struct bnx2x *bp)
6571 for_each_rx_queue(bp, i)
6572 napi_disable(&bnx2x_fp(bp, i, napi));
6575 static void bnx2x_netif_start(struct bnx2x *bp)
6577 if (atomic_dec_and_test(&bp->intr_sem)) {
6578 if (netif_running(bp->dev)) {
6579 bnx2x_napi_enable(bp);
6580 bnx2x_int_enable(bp);
6581 if (bp->state == BNX2X_STATE_OPEN)
6582 netif_tx_wake_all_queues(bp->dev);
6587 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6589 bnx2x_int_disable_sync(bp, disable_hw);
6590 bnx2x_napi_disable(bp);
6591 if (netif_running(bp->dev)) {
6592 netif_tx_disable(bp->dev);
6593 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6598 * Init service functions
6601 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6603 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6604 int port = BP_PORT(bp);
6607 * unicasts 0-31:port0 32-63:port1
6608 * multicast 64-127:port0 128-191:port1
6610 config->hdr.length = 2;
6611 config->hdr.offset = port ? 32 : 0;
6612 config->hdr.client_id = bp->fp->cl_id;
6613 config->hdr.reserved1 = 0;
6616 config->config_table[0].cam_entry.msb_mac_addr =
6617 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6618 config->config_table[0].cam_entry.middle_mac_addr =
6619 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6620 config->config_table[0].cam_entry.lsb_mac_addr =
6621 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6622 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6624 config->config_table[0].target_table_entry.flags = 0;
6626 CAM_INVALIDATE(config->config_table[0]);
6627 config->config_table[0].target_table_entry.client_id = 0;
6628 config->config_table[0].target_table_entry.vlan_id = 0;
6630 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6631 (set ? "setting" : "clearing"),
6632 config->config_table[0].cam_entry.msb_mac_addr,
6633 config->config_table[0].cam_entry.middle_mac_addr,
6634 config->config_table[0].cam_entry.lsb_mac_addr);
6637 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6638 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6639 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6640 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6642 config->config_table[1].target_table_entry.flags =
6643 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6645 CAM_INVALIDATE(config->config_table[1]);
6646 config->config_table[1].target_table_entry.client_id = 0;
6647 config->config_table[1].target_table_entry.vlan_id = 0;
6649 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6650 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6651 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6654 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6656 struct mac_configuration_cmd_e1h *config =
6657 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6659 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6660 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6664 /* CAM allocation for E1H
6665 * unicasts: by func number
6666 * multicast: 20+FUNC*20, 20 each
6668 config->hdr.length = 1;
6669 config->hdr.offset = BP_FUNC(bp);
6670 config->hdr.client_id = bp->fp->cl_id;
6671 config->hdr.reserved1 = 0;
6674 config->config_table[0].msb_mac_addr =
6675 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6676 config->config_table[0].middle_mac_addr =
6677 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6678 config->config_table[0].lsb_mac_addr =
6679 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6680 config->config_table[0].client_id = BP_L_ID(bp);
6681 config->config_table[0].vlan_id = 0;
6682 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6684 config->config_table[0].flags = BP_PORT(bp);
6686 config->config_table[0].flags =
6687 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6689 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6690 (set ? "setting" : "clearing"),
6691 config->config_table[0].msb_mac_addr,
6692 config->config_table[0].middle_mac_addr,
6693 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6695 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6696 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6697 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6700 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6701 int *state_p, int poll)
6703 /* can take a while if any port is running */
6706 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6707 poll ? "polling" : "waiting", state, idx);
6712 bnx2x_rx_int(bp->fp, 10);
6713 /* if index is different from 0
6714 * the reply for some commands will
6715 * be on the non default queue
6718 bnx2x_rx_int(&bp->fp[idx], 10);
6721 mb(); /* state is changed by bnx2x_sp_event() */
6722 if (*state_p == state) {
6723 #ifdef BNX2X_STOP_ON_ERROR
6724 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6733 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6734 poll ? "polling" : "waiting", state, idx);
6735 #ifdef BNX2X_STOP_ON_ERROR
6742 static int bnx2x_setup_leading(struct bnx2x *bp)
6746 /* reset IGU state */
6747 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6750 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6752 /* Wait for completion */
6753 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6758 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6760 struct bnx2x_fastpath *fp = &bp->fp[index];
6762 /* reset IGU state */
6763 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6766 fp->state = BNX2X_FP_STATE_OPENING;
6767 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6770 /* Wait for completion */
6771 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6775 static int bnx2x_poll(struct napi_struct *napi, int budget);
6777 static void bnx2x_set_int_mode(struct bnx2x *bp)
6785 bp->num_rx_queues = num_queues;
6786 bp->num_tx_queues = num_queues;
6788 "set number of queues to %d\n", num_queues);
6793 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6794 num_queues = min_t(u32, num_online_cpus(),
6795 BNX2X_MAX_QUEUES(bp));
6798 bp->num_rx_queues = num_queues;
6799 bp->num_tx_queues = num_queues;
6800 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6801 " number of tx queues to %d\n",
6802 bp->num_rx_queues, bp->num_tx_queues);
6803 /* if we can't use MSI-X we only need one fp,
6804 * so try to enable MSI-X with the requested number of fp's
6805 * and fallback to MSI or legacy INTx with one fp
6807 if (bnx2x_enable_msix(bp)) {
6808 /* failed to enable MSI-X */
6810 bp->num_rx_queues = num_queues;
6811 bp->num_tx_queues = num_queues;
6813 BNX2X_ERR("Multi requested but failed to "
6814 "enable MSI-X set number of "
6815 "queues to %d\n", num_queues);
6819 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6822 static void bnx2x_set_rx_mode(struct net_device *dev);
6824 /* must be called with rtnl_lock */
6825 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6829 #ifdef BNX2X_STOP_ON_ERROR
6830 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6831 if (unlikely(bp->panic))
6835 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6837 bnx2x_set_int_mode(bp);
6839 if (bnx2x_alloc_mem(bp))
6842 for_each_rx_queue(bp, i)
6843 bnx2x_fp(bp, i, disable_tpa) =
6844 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6846 for_each_rx_queue(bp, i)
6847 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6850 #ifdef BNX2X_STOP_ON_ERROR
6851 for_each_rx_queue(bp, i) {
6852 struct bnx2x_fastpath *fp = &bp->fp[i];
6854 fp->poll_no_work = 0;
6856 fp->poll_max_calls = 0;
6857 fp->poll_complete = 0;
6861 bnx2x_napi_enable(bp);
6863 if (bp->flags & USING_MSIX_FLAG) {
6864 rc = bnx2x_req_msix_irqs(bp);
6866 pci_disable_msix(bp->pdev);
6870 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6871 bnx2x_enable_msi(bp);
6873 rc = bnx2x_req_irq(bp);
6875 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6876 if (bp->flags & USING_MSI_FLAG)
6877 pci_disable_msi(bp->pdev);
6880 if (bp->flags & USING_MSI_FLAG) {
6881 bp->dev->irq = bp->pdev->irq;
6882 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6883 bp->dev->name, bp->pdev->irq);
6887 /* Send LOAD_REQUEST command to MCP
6888 Returns the type of LOAD command:
6889 if it is the first port to be initialized
6890 common blocks should be initialized, otherwise - not
6892 if (!BP_NOMCP(bp)) {
6893 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6895 BNX2X_ERR("MCP response failure, aborting\n");
6899 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6900 rc = -EBUSY; /* other port in diagnostic mode */
6905 int port = BP_PORT(bp);
6907 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6908 load_count[0], load_count[1], load_count[2]);
6910 load_count[1 + port]++;
6911 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6912 load_count[0], load_count[1], load_count[2]);
6913 if (load_count[0] == 1)
6914 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6915 else if (load_count[1 + port] == 1)
6916 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6918 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6921 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6922 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6926 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6929 rc = bnx2x_init_hw(bp, load_code);
6931 BNX2X_ERR("HW init failed, aborting\n");
6935 /* Setup NIC internals and enable interrupts */
6936 bnx2x_nic_init(bp, load_code);
6938 /* Send LOAD_DONE command to MCP */
6939 if (!BP_NOMCP(bp)) {
6940 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6942 BNX2X_ERR("MCP response failure, aborting\n");
6948 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6950 rc = bnx2x_setup_leading(bp);
6952 BNX2X_ERR("Setup leading failed!\n");
6956 if (CHIP_IS_E1H(bp))
6957 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6958 BNX2X_ERR("!!! mf_cfg function disabled\n");
6959 bp->state = BNX2X_STATE_DISABLED;
6962 if (bp->state == BNX2X_STATE_OPEN)
6963 for_each_nondefault_queue(bp, i) {
6964 rc = bnx2x_setup_multi(bp, i);
6970 bnx2x_set_mac_addr_e1(bp, 1);
6972 bnx2x_set_mac_addr_e1h(bp, 1);
6975 bnx2x_initial_phy_init(bp, load_mode);
6977 /* Start fast path */
6978 switch (load_mode) {
6980 /* Tx queue should be only reenabled */
6981 netif_tx_wake_all_queues(bp->dev);
6982 /* Initialize the receive filter. */
6983 bnx2x_set_rx_mode(bp->dev);
6987 netif_tx_start_all_queues(bp->dev);
6988 /* Initialize the receive filter. */
6989 bnx2x_set_rx_mode(bp->dev);
6993 /* Initialize the receive filter. */
6994 bnx2x_set_rx_mode(bp->dev);
6995 bp->state = BNX2X_STATE_DIAG;
7003 bnx2x__link_status_update(bp);
7005 /* start the timer */
7006 mod_timer(&bp->timer, jiffies + bp->current_interval);
7012 bnx2x_int_disable_sync(bp, 1);
7013 if (!BP_NOMCP(bp)) {
7014 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7015 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7018 /* Free SKBs, SGEs, TPA pool and driver internals */
7019 bnx2x_free_skbs(bp);
7020 for_each_rx_queue(bp, i)
7021 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7026 bnx2x_napi_disable(bp);
7027 for_each_rx_queue(bp, i)
7028 netif_napi_del(&bnx2x_fp(bp, i, napi));
7031 /* TBD we really need to reset the chip
7032 if we want to recover from this */
7036 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7038 struct bnx2x_fastpath *fp = &bp->fp[index];
7041 /* halt the connection */
7042 fp->state = BNX2X_FP_STATE_HALTING;
7043 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7045 /* Wait for completion */
7046 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7048 if (rc) /* timeout */
7051 /* delete cfc entry */
7052 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7054 /* Wait for completion */
7055 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7060 static int bnx2x_stop_leading(struct bnx2x *bp)
7062 __le16 dsb_sp_prod_idx;
7063 /* if the other port is handling traffic,
7064 this can take a lot of time */
7070 /* Send HALT ramrod */
7071 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7072 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7074 /* Wait for completion */
7075 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7076 &(bp->fp[0].state), 1);
7077 if (rc) /* timeout */
7080 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7082 /* Send PORT_DELETE ramrod */
7083 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7085 /* Wait for completion to arrive on default status block
7086 we are going to reset the chip anyway
7087 so there is not much to do if this times out
7089 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7091 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7092 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7093 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7094 #ifdef BNX2X_STOP_ON_ERROR
7102 rmb(); /* Refresh the dsb_sp_prod */
7104 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7105 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7110 static void bnx2x_reset_func(struct bnx2x *bp)
7112 int port = BP_PORT(bp);
7113 int func = BP_FUNC(bp);
7117 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7118 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7121 base = FUNC_ILT_BASE(func);
7122 for (i = base; i < base + ILT_PER_FUNC; i++)
7123 bnx2x_ilt_wr(bp, i, 0);
7126 static void bnx2x_reset_port(struct bnx2x *bp)
7128 int port = BP_PORT(bp);
7131 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7133 /* Do not rcv packets to BRB */
7134 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7135 /* Do not direct rcv packets that are not for MCP to the BRB */
7136 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7137 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7140 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7143 /* Check for BRB port occupancy */
7144 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7146 DP(NETIF_MSG_IFDOWN,
7147 "BRB1 is not empty %d blocks are occupied\n", val);
7149 /* TODO: Close Doorbell port? */
7152 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7154 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7155 BP_FUNC(bp), reset_code);
7157 switch (reset_code) {
7158 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7159 bnx2x_reset_port(bp);
7160 bnx2x_reset_func(bp);
7161 bnx2x_reset_common(bp);
7164 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7165 bnx2x_reset_port(bp);
7166 bnx2x_reset_func(bp);
7169 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7170 bnx2x_reset_func(bp);
7174 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7179 /* must be called with rtnl_lock */
7180 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7182 int port = BP_PORT(bp);
7186 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7188 bp->rx_mode = BNX2X_RX_MODE_NONE;
7189 bnx2x_set_storm_rx_mode(bp);
7191 bnx2x_netif_stop(bp, 1);
7193 del_timer_sync(&bp->timer);
7194 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7195 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7196 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7201 /* Wait until tx fastpath tasks complete */
7202 for_each_tx_queue(bp, i) {
7203 struct bnx2x_fastpath *fp = &bp->fp[i];
7207 while (bnx2x_has_tx_work_unload(fp)) {
7209 bnx2x_tx_int(fp, 1000);
7211 BNX2X_ERR("timeout waiting for queue[%d]\n",
7213 #ifdef BNX2X_STOP_ON_ERROR
7225 /* Give HW time to discard old tx messages */
7228 if (CHIP_IS_E1(bp)) {
7229 struct mac_configuration_cmd *config =
7230 bnx2x_sp(bp, mcast_config);
7232 bnx2x_set_mac_addr_e1(bp, 0);
7234 for (i = 0; i < config->hdr.length; i++)
7235 CAM_INVALIDATE(config->config_table[i]);
7237 config->hdr.length = i;
7238 if (CHIP_REV_IS_SLOW(bp))
7239 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7241 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7242 config->hdr.client_id = bp->fp->cl_id;
7243 config->hdr.reserved1 = 0;
7245 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7246 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7247 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7250 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7252 bnx2x_set_mac_addr_e1h(bp, 0);
7254 for (i = 0; i < MC_HASH_SIZE; i++)
7255 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7258 if (unload_mode == UNLOAD_NORMAL)
7259 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7261 else if (bp->flags & NO_WOL_FLAG) {
7262 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7263 if (CHIP_IS_E1H(bp))
7264 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7266 } else if (bp->wol) {
7267 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7268 u8 *mac_addr = bp->dev->dev_addr;
7270 /* The mac address is written to entries 1-4 to
7271 preserve entry 0 which is used by the PMF */
7272 u8 entry = (BP_E1HVN(bp) + 1)*8;
7274 val = (mac_addr[0] << 8) | mac_addr[1];
7275 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7277 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7278 (mac_addr[4] << 8) | mac_addr[5];
7279 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7281 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7284 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7286 /* Close multi and leading connections
7287 Completions for ramrods are collected in a synchronous way */
7288 for_each_nondefault_queue(bp, i)
7289 if (bnx2x_stop_multi(bp, i))
7292 rc = bnx2x_stop_leading(bp);
7294 BNX2X_ERR("Stop leading failed!\n");
7295 #ifdef BNX2X_STOP_ON_ERROR
7304 reset_code = bnx2x_fw_command(bp, reset_code);
7306 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7307 load_count[0], load_count[1], load_count[2]);
7309 load_count[1 + port]--;
7310 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7311 load_count[0], load_count[1], load_count[2]);
7312 if (load_count[0] == 0)
7313 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7314 else if (load_count[1 + port] == 0)
7315 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7317 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7320 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7321 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7322 bnx2x__link_reset(bp);
7324 /* Reset the chip */
7325 bnx2x_reset_chip(bp, reset_code);
7327 /* Report UNLOAD_DONE to MCP */
7329 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7332 /* Free SKBs, SGEs, TPA pool and driver internals */
7333 bnx2x_free_skbs(bp);
7334 for_each_rx_queue(bp, i)
7335 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7336 for_each_rx_queue(bp, i)
7337 netif_napi_del(&bnx2x_fp(bp, i, napi));
7340 bp->state = BNX2X_STATE_CLOSED;
7342 netif_carrier_off(bp->dev);
7347 static void bnx2x_reset_task(struct work_struct *work)
7349 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7351 #ifdef BNX2X_STOP_ON_ERROR
7352 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7353 " so reset not done to allow debug dump,\n"
7354 KERN_ERR " you will need to reboot when done\n");
7360 if (!netif_running(bp->dev))
7361 goto reset_task_exit;
7363 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7364 bnx2x_nic_load(bp, LOAD_NORMAL);
7370 /* end of nic load/unload */
7375 * Init service functions
7378 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7381 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7382 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7383 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7384 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7385 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7386 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7387 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7388 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7390 BNX2X_ERR("Unsupported function index: %d\n", func);
7395 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7397 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7399 /* Flush all outstanding writes */
7402 /* Pretend to be function 0 */
7404 /* Flush the GRC transaction (in the chip) */
7405 new_val = REG_RD(bp, reg);
7407 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7412 /* From now we are in the "like-E1" mode */
7413 bnx2x_int_disable(bp);
7415 /* Flush all outstanding writes */
7418 /* Restore the original funtion settings */
7419 REG_WR(bp, reg, orig_func);
7420 new_val = REG_RD(bp, reg);
7421 if (new_val != orig_func) {
7422 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7423 orig_func, new_val);
7428 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7430 if (CHIP_IS_E1H(bp))
7431 bnx2x_undi_int_disable_e1h(bp, func);
7433 bnx2x_int_disable(bp);
7436 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7440 /* Check if there is any driver already loaded */
7441 val = REG_RD(bp, MISC_REG_UNPREPARED);
7443 /* Check if it is the UNDI driver
7444 * UNDI driver initializes CID offset for normal bell to 0x7
7446 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7447 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7449 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7451 int func = BP_FUNC(bp);
7455 /* clear the UNDI indication */
7456 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7458 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7460 /* try unload UNDI on port 0 */
7463 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7464 DRV_MSG_SEQ_NUMBER_MASK);
7465 reset_code = bnx2x_fw_command(bp, reset_code);
7467 /* if UNDI is loaded on the other port */
7468 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7470 /* send "DONE" for previous unload */
7471 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7473 /* unload UNDI on port 1 */
7476 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7477 DRV_MSG_SEQ_NUMBER_MASK);
7478 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7480 bnx2x_fw_command(bp, reset_code);
7483 /* now it's safe to release the lock */
7484 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7486 bnx2x_undi_int_disable(bp, func);
7488 /* close input traffic and wait for it */
7489 /* Do not rcv packets to BRB */
7491 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7492 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7493 /* Do not direct rcv packets that are not for MCP to
7496 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7497 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7500 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7501 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7504 /* save NIG port swap info */
7505 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7506 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7509 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7512 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7514 /* take the NIG out of reset and restore swap values */
7516 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7517 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7518 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7519 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7521 /* send unload done to the MCP */
7522 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7524 /* restore our func and fw_seq */
7527 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7528 DRV_MSG_SEQ_NUMBER_MASK);
7531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7535 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7537 u32 val, val2, val3, val4, id;
7540 /* Get the chip revision id and number. */
7541 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7542 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7543 id = ((val & 0xffff) << 16);
7544 val = REG_RD(bp, MISC_REG_CHIP_REV);
7545 id |= ((val & 0xf) << 12);
7546 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7547 id |= ((val & 0xff) << 4);
7548 val = REG_RD(bp, MISC_REG_BOND_ID);
7550 bp->common.chip_id = id;
7551 bp->link_params.chip_id = bp->common.chip_id;
7552 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7554 val = (REG_RD(bp, 0x2874) & 0x55);
7555 if ((bp->common.chip_id & 0x1) ||
7556 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7557 bp->flags |= ONE_PORT_FLAG;
7558 BNX2X_DEV_INFO("single port device\n");
7561 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7562 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7563 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7564 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7565 bp->common.flash_size, bp->common.flash_size);
7567 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7568 bp->link_params.shmem_base = bp->common.shmem_base;
7569 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7571 if (!bp->common.shmem_base ||
7572 (bp->common.shmem_base < 0xA0000) ||
7573 (bp->common.shmem_base >= 0xC0000)) {
7574 BNX2X_DEV_INFO("MCP not active\n");
7575 bp->flags |= NO_MCP_FLAG;
7579 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7580 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7581 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7582 BNX2X_ERR("BAD MCP validity signature\n");
7584 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7585 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7587 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7588 SHARED_HW_CFG_LED_MODE_MASK) >>
7589 SHARED_HW_CFG_LED_MODE_SHIFT);
7591 bp->link_params.feature_config_flags = 0;
7592 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7593 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7594 bp->link_params.feature_config_flags |=
7595 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7597 bp->link_params.feature_config_flags &=
7598 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7600 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7601 bp->common.bc_ver = val;
7602 BNX2X_DEV_INFO("bc_ver %X\n", val);
7603 if (val < BNX2X_BC_VER) {
7604 /* for now only warn
7605 * later we might need to enforce this */
7606 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7607 " please upgrade BC\n", BNX2X_BC_VER, val);
7610 if (BP_E1HVN(bp) == 0) {
7611 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7612 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7614 /* no WOL capability for E1HVN != 0 */
7615 bp->flags |= NO_WOL_FLAG;
7617 BNX2X_DEV_INFO("%sWoL capable\n",
7618 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7620 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7621 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7622 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7623 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7625 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7626 val, val2, val3, val4);
7629 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7632 int port = BP_PORT(bp);
7635 switch (switch_cfg) {
7637 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7640 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7641 switch (ext_phy_type) {
7642 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7643 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7646 bp->port.supported |= (SUPPORTED_10baseT_Half |
7647 SUPPORTED_10baseT_Full |
7648 SUPPORTED_100baseT_Half |
7649 SUPPORTED_100baseT_Full |
7650 SUPPORTED_1000baseT_Full |
7651 SUPPORTED_2500baseX_Full |
7656 SUPPORTED_Asym_Pause);
7659 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7660 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7663 bp->port.supported |= (SUPPORTED_10baseT_Half |
7664 SUPPORTED_10baseT_Full |
7665 SUPPORTED_100baseT_Half |
7666 SUPPORTED_100baseT_Full |
7667 SUPPORTED_1000baseT_Full |
7672 SUPPORTED_Asym_Pause);
7676 BNX2X_ERR("NVRAM config error. "
7677 "BAD SerDes ext_phy_config 0x%x\n",
7678 bp->link_params.ext_phy_config);
7682 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7684 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7687 case SWITCH_CFG_10G:
7688 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7691 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7692 switch (ext_phy_type) {
7693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7694 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7697 bp->port.supported |= (SUPPORTED_10baseT_Half |
7698 SUPPORTED_10baseT_Full |
7699 SUPPORTED_100baseT_Half |
7700 SUPPORTED_100baseT_Full |
7701 SUPPORTED_1000baseT_Full |
7702 SUPPORTED_2500baseX_Full |
7703 SUPPORTED_10000baseT_Full |
7708 SUPPORTED_Asym_Pause);
7711 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7712 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7715 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7716 SUPPORTED_1000baseT_Full |
7720 SUPPORTED_Asym_Pause);
7723 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7724 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7727 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7728 SUPPORTED_2500baseX_Full |
7729 SUPPORTED_1000baseT_Full |
7733 SUPPORTED_Asym_Pause);
7736 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7737 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7740 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7743 SUPPORTED_Asym_Pause);
7746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7747 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7750 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7751 SUPPORTED_1000baseT_Full |
7754 SUPPORTED_Asym_Pause);
7757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7758 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7761 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7762 SUPPORTED_1000baseT_Full |
7766 SUPPORTED_Asym_Pause);
7769 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7770 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7773 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7777 SUPPORTED_Asym_Pause);
7780 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7781 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7784 bp->port.supported |= (SUPPORTED_10baseT_Half |
7785 SUPPORTED_10baseT_Full |
7786 SUPPORTED_100baseT_Half |
7787 SUPPORTED_100baseT_Full |
7788 SUPPORTED_1000baseT_Full |
7789 SUPPORTED_10000baseT_Full |
7793 SUPPORTED_Asym_Pause);
7796 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7797 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7798 bp->link_params.ext_phy_config);
7802 BNX2X_ERR("NVRAM config error. "
7803 "BAD XGXS ext_phy_config 0x%x\n",
7804 bp->link_params.ext_phy_config);
7808 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7810 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7815 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7816 bp->port.link_config);
7819 bp->link_params.phy_addr = bp->port.phy_addr;
7821 /* mask what we support according to speed_cap_mask */
7822 if (!(bp->link_params.speed_cap_mask &
7823 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7824 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7826 if (!(bp->link_params.speed_cap_mask &
7827 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7828 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7830 if (!(bp->link_params.speed_cap_mask &
7831 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7832 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7834 if (!(bp->link_params.speed_cap_mask &
7835 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7836 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7838 if (!(bp->link_params.speed_cap_mask &
7839 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7840 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7841 SUPPORTED_1000baseT_Full);
7843 if (!(bp->link_params.speed_cap_mask &
7844 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7845 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7847 if (!(bp->link_params.speed_cap_mask &
7848 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7849 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7851 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7854 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7856 bp->link_params.req_duplex = DUPLEX_FULL;
7858 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7859 case PORT_FEATURE_LINK_SPEED_AUTO:
7860 if (bp->port.supported & SUPPORTED_Autoneg) {
7861 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7862 bp->port.advertising = bp->port.supported;
7865 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7867 if ((ext_phy_type ==
7868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7870 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7871 /* force 10G, no AN */
7872 bp->link_params.req_line_speed = SPEED_10000;
7873 bp->port.advertising =
7874 (ADVERTISED_10000baseT_Full |
7878 BNX2X_ERR("NVRAM config error. "
7879 "Invalid link_config 0x%x"
7880 " Autoneg not supported\n",
7881 bp->port.link_config);
7886 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7887 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7888 bp->link_params.req_line_speed = SPEED_10;
7889 bp->port.advertising = (ADVERTISED_10baseT_Full |
7892 BNX2X_ERR("NVRAM config error. "
7893 "Invalid link_config 0x%x"
7894 " speed_cap_mask 0x%x\n",
7895 bp->port.link_config,
7896 bp->link_params.speed_cap_mask);
7901 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7902 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7903 bp->link_params.req_line_speed = SPEED_10;
7904 bp->link_params.req_duplex = DUPLEX_HALF;
7905 bp->port.advertising = (ADVERTISED_10baseT_Half |
7908 BNX2X_ERR("NVRAM config error. "
7909 "Invalid link_config 0x%x"
7910 " speed_cap_mask 0x%x\n",
7911 bp->port.link_config,
7912 bp->link_params.speed_cap_mask);
7917 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7918 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7919 bp->link_params.req_line_speed = SPEED_100;
7920 bp->port.advertising = (ADVERTISED_100baseT_Full |
7923 BNX2X_ERR("NVRAM config error. "
7924 "Invalid link_config 0x%x"
7925 " speed_cap_mask 0x%x\n",
7926 bp->port.link_config,
7927 bp->link_params.speed_cap_mask);
7932 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7933 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7934 bp->link_params.req_line_speed = SPEED_100;
7935 bp->link_params.req_duplex = DUPLEX_HALF;
7936 bp->port.advertising = (ADVERTISED_100baseT_Half |
7939 BNX2X_ERR("NVRAM config error. "
7940 "Invalid link_config 0x%x"
7941 " speed_cap_mask 0x%x\n",
7942 bp->port.link_config,
7943 bp->link_params.speed_cap_mask);
7948 case PORT_FEATURE_LINK_SPEED_1G:
7949 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7950 bp->link_params.req_line_speed = SPEED_1000;
7951 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7954 BNX2X_ERR("NVRAM config error. "
7955 "Invalid link_config 0x%x"
7956 " speed_cap_mask 0x%x\n",
7957 bp->port.link_config,
7958 bp->link_params.speed_cap_mask);
7963 case PORT_FEATURE_LINK_SPEED_2_5G:
7964 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7965 bp->link_params.req_line_speed = SPEED_2500;
7966 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7969 BNX2X_ERR("NVRAM config error. "
7970 "Invalid link_config 0x%x"
7971 " speed_cap_mask 0x%x\n",
7972 bp->port.link_config,
7973 bp->link_params.speed_cap_mask);
7978 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7979 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7980 case PORT_FEATURE_LINK_SPEED_10G_KR:
7981 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7982 bp->link_params.req_line_speed = SPEED_10000;
7983 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7986 BNX2X_ERR("NVRAM config error. "
7987 "Invalid link_config 0x%x"
7988 " speed_cap_mask 0x%x\n",
7989 bp->port.link_config,
7990 bp->link_params.speed_cap_mask);
7996 BNX2X_ERR("NVRAM config error. "
7997 "BAD link speed link_config 0x%x\n",
7998 bp->port.link_config);
7999 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8000 bp->port.advertising = bp->port.supported;
8004 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8005 PORT_FEATURE_FLOW_CONTROL_MASK);
8006 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8007 !(bp->port.supported & SUPPORTED_Autoneg))
8008 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8010 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8011 " advertising 0x%x\n",
8012 bp->link_params.req_line_speed,
8013 bp->link_params.req_duplex,
8014 bp->link_params.req_flow_ctrl, bp->port.advertising);
8017 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8019 int port = BP_PORT(bp);
8024 bp->link_params.bp = bp;
8025 bp->link_params.port = port;
8027 bp->link_params.lane_config =
8028 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8029 bp->link_params.ext_phy_config =
8031 dev_info.port_hw_config[port].external_phy_config);
8032 bp->link_params.speed_cap_mask =
8034 dev_info.port_hw_config[port].speed_capability_mask);
8036 bp->port.link_config =
8037 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8039 /* Get the 4 lanes xgxs config rx and tx */
8040 for (i = 0; i < 2; i++) {
8042 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8043 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8044 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8047 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8048 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8049 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8052 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8053 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8054 bp->link_params.feature_config_flags |=
8055 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8057 bp->link_params.feature_config_flags &=
8058 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8060 /* If the device is capable of WoL, set the default state according
8063 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8064 (config & PORT_FEATURE_WOL_ENABLED));
8066 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8067 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8068 bp->link_params.lane_config,
8069 bp->link_params.ext_phy_config,
8070 bp->link_params.speed_cap_mask, bp->port.link_config);
8072 bp->link_params.switch_cfg = (bp->port.link_config &
8073 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8074 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8076 bnx2x_link_settings_requested(bp);
8078 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8079 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8080 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8081 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8082 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8083 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8084 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8085 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8086 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8087 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8090 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8092 int func = BP_FUNC(bp);
8096 bnx2x_get_common_hwinfo(bp);
8100 if (CHIP_IS_E1H(bp)) {
8102 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8104 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8105 FUNC_MF_CFG_E1HOV_TAG_MASK);
8106 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8110 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8112 func, bp->e1hov, bp->e1hov);
8114 BNX2X_DEV_INFO("Single function mode\n");
8116 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8117 " aborting\n", func);
8123 if (!BP_NOMCP(bp)) {
8124 bnx2x_get_port_hwinfo(bp);
8126 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8127 DRV_MSG_SEQ_NUMBER_MASK);
8128 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8132 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8133 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8134 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8135 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8136 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8137 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8138 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8139 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8140 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8141 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8142 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8144 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8152 /* only supposed to happen on emulation/FPGA */
8153 BNX2X_ERR("warning random MAC workaround active\n");
8154 random_ether_addr(bp->dev->dev_addr);
8155 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8161 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8163 int func = BP_FUNC(bp);
8167 /* Disable interrupt handling until HW is initialized */
8168 atomic_set(&bp->intr_sem, 1);
8170 mutex_init(&bp->port.phy_mutex);
8172 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8173 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8175 rc = bnx2x_get_hwinfo(bp);
8177 /* need to reset chip if undi was active */
8179 bnx2x_undi_unload(bp);
8181 if (CHIP_REV_IS_FPGA(bp))
8182 printk(KERN_ERR PFX "FPGA detected\n");
8184 if (BP_NOMCP(bp) && (func == 0))
8186 "MCP disabled, must load devices in order!\n");
8188 /* Set multi queue mode */
8189 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8190 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8192 "Multi disabled since int_mode requested is not MSI-X\n");
8193 multi_mode = ETH_RSS_MODE_DISABLED;
8195 bp->multi_mode = multi_mode;
8200 bp->flags &= ~TPA_ENABLE_FLAG;
8201 bp->dev->features &= ~NETIF_F_LRO;
8203 bp->flags |= TPA_ENABLE_FLAG;
8204 bp->dev->features |= NETIF_F_LRO;
8209 bp->tx_ring_size = MAX_TX_AVAIL;
8210 bp->rx_ring_size = MAX_RX_AVAIL;
8217 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8218 bp->current_interval = (poll ? poll : timer_interval);
8220 init_timer(&bp->timer);
8221 bp->timer.expires = jiffies + bp->current_interval;
8222 bp->timer.data = (unsigned long) bp;
8223 bp->timer.function = bnx2x_timer;
8229 * ethtool service functions
8232 /* All ethtool functions called with rtnl_lock */
8234 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8236 struct bnx2x *bp = netdev_priv(dev);
8238 cmd->supported = bp->port.supported;
8239 cmd->advertising = bp->port.advertising;
8241 if (netif_carrier_ok(dev)) {
8242 cmd->speed = bp->link_vars.line_speed;
8243 cmd->duplex = bp->link_vars.duplex;
8245 cmd->speed = bp->link_params.req_line_speed;
8246 cmd->duplex = bp->link_params.req_duplex;
8251 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8252 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8253 if (vn_max_rate < cmd->speed)
8254 cmd->speed = vn_max_rate;
8257 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8259 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8261 switch (ext_phy_type) {
8262 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8263 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8264 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8265 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8266 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8267 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8268 cmd->port = PORT_FIBRE;
8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8273 cmd->port = PORT_TP;
8276 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8277 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8278 bp->link_params.ext_phy_config);
8282 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8283 bp->link_params.ext_phy_config);
8287 cmd->port = PORT_TP;
8289 cmd->phy_address = bp->port.phy_addr;
8290 cmd->transceiver = XCVR_INTERNAL;
8292 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8293 cmd->autoneg = AUTONEG_ENABLE;
8295 cmd->autoneg = AUTONEG_DISABLE;
8300 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8301 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8302 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8303 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8304 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8305 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8306 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8311 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8313 struct bnx2x *bp = netdev_priv(dev);
8319 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8320 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8321 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8322 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8323 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8324 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8325 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8327 if (cmd->autoneg == AUTONEG_ENABLE) {
8328 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8329 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8333 /* advertise the requested speed and duplex if supported */
8334 cmd->advertising &= bp->port.supported;
8336 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8337 bp->link_params.req_duplex = DUPLEX_FULL;
8338 bp->port.advertising |= (ADVERTISED_Autoneg |
8341 } else { /* forced speed */
8342 /* advertise the requested speed and duplex if supported */
8343 switch (cmd->speed) {
8345 if (cmd->duplex == DUPLEX_FULL) {
8346 if (!(bp->port.supported &
8347 SUPPORTED_10baseT_Full)) {
8349 "10M full not supported\n");
8353 advertising = (ADVERTISED_10baseT_Full |
8356 if (!(bp->port.supported &
8357 SUPPORTED_10baseT_Half)) {
8359 "10M half not supported\n");
8363 advertising = (ADVERTISED_10baseT_Half |
8369 if (cmd->duplex == DUPLEX_FULL) {
8370 if (!(bp->port.supported &
8371 SUPPORTED_100baseT_Full)) {
8373 "100M full not supported\n");
8377 advertising = (ADVERTISED_100baseT_Full |
8380 if (!(bp->port.supported &
8381 SUPPORTED_100baseT_Half)) {
8383 "100M half not supported\n");
8387 advertising = (ADVERTISED_100baseT_Half |
8393 if (cmd->duplex != DUPLEX_FULL) {
8394 DP(NETIF_MSG_LINK, "1G half not supported\n");
8398 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8399 DP(NETIF_MSG_LINK, "1G full not supported\n");
8403 advertising = (ADVERTISED_1000baseT_Full |
8408 if (cmd->duplex != DUPLEX_FULL) {
8410 "2.5G half not supported\n");
8414 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8416 "2.5G full not supported\n");
8420 advertising = (ADVERTISED_2500baseX_Full |
8425 if (cmd->duplex != DUPLEX_FULL) {
8426 DP(NETIF_MSG_LINK, "10G half not supported\n");
8430 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8431 DP(NETIF_MSG_LINK, "10G full not supported\n");
8435 advertising = (ADVERTISED_10000baseT_Full |
8440 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8444 bp->link_params.req_line_speed = cmd->speed;
8445 bp->link_params.req_duplex = cmd->duplex;
8446 bp->port.advertising = advertising;
8449 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8450 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8451 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8452 bp->port.advertising);
8454 if (netif_running(dev)) {
8455 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8462 #define PHY_FW_VER_LEN 10
8464 static void bnx2x_get_drvinfo(struct net_device *dev,
8465 struct ethtool_drvinfo *info)
8467 struct bnx2x *bp = netdev_priv(dev);
8468 u8 phy_fw_ver[PHY_FW_VER_LEN];
8470 strcpy(info->driver, DRV_MODULE_NAME);
8471 strcpy(info->version, DRV_MODULE_VERSION);
8473 phy_fw_ver[0] = '\0';
8475 bnx2x_acquire_phy_lock(bp);
8476 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8477 (bp->state != BNX2X_STATE_CLOSED),
8478 phy_fw_ver, PHY_FW_VER_LEN);
8479 bnx2x_release_phy_lock(bp);
8482 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8483 (bp->common.bc_ver & 0xff0000) >> 16,
8484 (bp->common.bc_ver & 0xff00) >> 8,
8485 (bp->common.bc_ver & 0xff),
8486 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8487 strcpy(info->bus_info, pci_name(bp->pdev));
8488 info->n_stats = BNX2X_NUM_STATS;
8489 info->testinfo_len = BNX2X_NUM_TESTS;
8490 info->eedump_len = bp->common.flash_size;
8491 info->regdump_len = 0;
8494 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8496 struct bnx2x *bp = netdev_priv(dev);
8498 if (bp->flags & NO_WOL_FLAG) {
8502 wol->supported = WAKE_MAGIC;
8504 wol->wolopts = WAKE_MAGIC;
8508 memset(&wol->sopass, 0, sizeof(wol->sopass));
8511 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8513 struct bnx2x *bp = netdev_priv(dev);
8515 if (wol->wolopts & ~WAKE_MAGIC)
8518 if (wol->wolopts & WAKE_MAGIC) {
8519 if (bp->flags & NO_WOL_FLAG)
8529 static u32 bnx2x_get_msglevel(struct net_device *dev)
8531 struct bnx2x *bp = netdev_priv(dev);
8533 return bp->msglevel;
8536 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8538 struct bnx2x *bp = netdev_priv(dev);
8540 if (capable(CAP_NET_ADMIN))
8541 bp->msglevel = level;
8544 static int bnx2x_nway_reset(struct net_device *dev)
8546 struct bnx2x *bp = netdev_priv(dev);
8551 if (netif_running(dev)) {
8552 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8559 static int bnx2x_get_eeprom_len(struct net_device *dev)
8561 struct bnx2x *bp = netdev_priv(dev);
8563 return bp->common.flash_size;
8566 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8568 int port = BP_PORT(bp);
8572 /* adjust timeout for emulation/FPGA */
8573 count = NVRAM_TIMEOUT_COUNT;
8574 if (CHIP_REV_IS_SLOW(bp))
8577 /* request access to nvram interface */
8578 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8579 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8581 for (i = 0; i < count*10; i++) {
8582 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8583 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8589 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8590 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8597 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8599 int port = BP_PORT(bp);
8603 /* adjust timeout for emulation/FPGA */
8604 count = NVRAM_TIMEOUT_COUNT;
8605 if (CHIP_REV_IS_SLOW(bp))
8608 /* relinquish nvram interface */
8609 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8610 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8612 for (i = 0; i < count*10; i++) {
8613 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8614 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8620 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8621 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8628 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8632 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8634 /* enable both bits, even on read */
8635 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8636 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8637 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8640 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8644 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8646 /* disable both bits, even after read */
8647 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8648 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8649 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8652 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8658 /* build the command word */
8659 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8661 /* need to clear DONE bit separately */
8662 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8664 /* address of the NVRAM to read from */
8665 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8666 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8668 /* issue a read command */
8669 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8671 /* adjust timeout for emulation/FPGA */
8672 count = NVRAM_TIMEOUT_COUNT;
8673 if (CHIP_REV_IS_SLOW(bp))
8676 /* wait for completion */
8679 for (i = 0; i < count; i++) {
8681 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8683 if (val & MCPR_NVM_COMMAND_DONE) {
8684 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8685 /* we read nvram data in cpu order
8686 * but ethtool sees it as an array of bytes
8687 * converting to big-endian will do the work */
8688 *ret_val = cpu_to_be32(val);
8697 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8704 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8706 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8711 if (offset + buf_size > bp->common.flash_size) {
8712 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8713 " buf_size (0x%x) > flash_size (0x%x)\n",
8714 offset, buf_size, bp->common.flash_size);
8718 /* request access to nvram interface */
8719 rc = bnx2x_acquire_nvram_lock(bp);
8723 /* enable access to nvram interface */
8724 bnx2x_enable_nvram_access(bp);
8726 /* read the first word(s) */
8727 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8728 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8729 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8730 memcpy(ret_buf, &val, 4);
8732 /* advance to the next dword */
8733 offset += sizeof(u32);
8734 ret_buf += sizeof(u32);
8735 buf_size -= sizeof(u32);
8740 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8741 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8742 memcpy(ret_buf, &val, 4);
8745 /* disable access to nvram interface */
8746 bnx2x_disable_nvram_access(bp);
8747 bnx2x_release_nvram_lock(bp);
8752 static int bnx2x_get_eeprom(struct net_device *dev,
8753 struct ethtool_eeprom *eeprom, u8 *eebuf)
8755 struct bnx2x *bp = netdev_priv(dev);
8758 if (!netif_running(dev))
8761 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8762 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8763 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8764 eeprom->len, eeprom->len);
8766 /* parameters already validated in ethtool_get_eeprom */
8768 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8773 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8778 /* build the command word */
8779 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8781 /* need to clear DONE bit separately */
8782 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8784 /* write the data */
8785 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8787 /* address of the NVRAM to write to */
8788 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8789 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8791 /* issue the write command */
8792 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8794 /* adjust timeout for emulation/FPGA */
8795 count = NVRAM_TIMEOUT_COUNT;
8796 if (CHIP_REV_IS_SLOW(bp))
8799 /* wait for completion */
8801 for (i = 0; i < count; i++) {
8803 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8804 if (val & MCPR_NVM_COMMAND_DONE) {
8813 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8815 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8823 if (offset + buf_size > bp->common.flash_size) {
8824 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8825 " buf_size (0x%x) > flash_size (0x%x)\n",
8826 offset, buf_size, bp->common.flash_size);
8830 /* request access to nvram interface */
8831 rc = bnx2x_acquire_nvram_lock(bp);
8835 /* enable access to nvram interface */
8836 bnx2x_enable_nvram_access(bp);
8838 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8839 align_offset = (offset & ~0x03);
8840 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8843 val &= ~(0xff << BYTE_OFFSET(offset));
8844 val |= (*data_buf << BYTE_OFFSET(offset));
8846 /* nvram data is returned as an array of bytes
8847 * convert it back to cpu order */
8848 val = be32_to_cpu(val);
8850 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8854 /* disable access to nvram interface */
8855 bnx2x_disable_nvram_access(bp);
8856 bnx2x_release_nvram_lock(bp);
8861 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8869 if (buf_size == 1) /* ethtool */
8870 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8872 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8874 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8879 if (offset + buf_size > bp->common.flash_size) {
8880 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8881 " buf_size (0x%x) > flash_size (0x%x)\n",
8882 offset, buf_size, bp->common.flash_size);
8886 /* request access to nvram interface */
8887 rc = bnx2x_acquire_nvram_lock(bp);
8891 /* enable access to nvram interface */
8892 bnx2x_enable_nvram_access(bp);
8895 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8896 while ((written_so_far < buf_size) && (rc == 0)) {
8897 if (written_so_far == (buf_size - sizeof(u32)))
8898 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8899 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8900 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8901 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8902 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8904 memcpy(&val, data_buf, 4);
8906 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8908 /* advance to the next dword */
8909 offset += sizeof(u32);
8910 data_buf += sizeof(u32);
8911 written_so_far += sizeof(u32);
8915 /* disable access to nvram interface */
8916 bnx2x_disable_nvram_access(bp);
8917 bnx2x_release_nvram_lock(bp);
8922 static int bnx2x_set_eeprom(struct net_device *dev,
8923 struct ethtool_eeprom *eeprom, u8 *eebuf)
8925 struct bnx2x *bp = netdev_priv(dev);
8928 if (!netif_running(dev))
8931 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8932 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8933 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8934 eeprom->len, eeprom->len);
8936 /* parameters already validated in ethtool_set_eeprom */
8938 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8939 if (eeprom->magic == 0x00504859)
8942 bnx2x_acquire_phy_lock(bp);
8943 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8944 bp->link_params.ext_phy_config,
8945 (bp->state != BNX2X_STATE_CLOSED),
8946 eebuf, eeprom->len);
8947 if ((bp->state == BNX2X_STATE_OPEN) ||
8948 (bp->state == BNX2X_STATE_DISABLED)) {
8949 rc |= bnx2x_link_reset(&bp->link_params,
8951 rc |= bnx2x_phy_init(&bp->link_params,
8954 bnx2x_release_phy_lock(bp);
8956 } else /* Only the PMF can access the PHY */
8959 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8964 static int bnx2x_get_coalesce(struct net_device *dev,
8965 struct ethtool_coalesce *coal)
8967 struct bnx2x *bp = netdev_priv(dev);
8969 memset(coal, 0, sizeof(struct ethtool_coalesce));
8971 coal->rx_coalesce_usecs = bp->rx_ticks;
8972 coal->tx_coalesce_usecs = bp->tx_ticks;
8977 static int bnx2x_set_coalesce(struct net_device *dev,
8978 struct ethtool_coalesce *coal)
8980 struct bnx2x *bp = netdev_priv(dev);
8982 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8983 if (bp->rx_ticks > 3000)
8984 bp->rx_ticks = 3000;
8986 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8987 if (bp->tx_ticks > 0x3000)
8988 bp->tx_ticks = 0x3000;
8990 if (netif_running(dev))
8991 bnx2x_update_coalesce(bp);
8996 static void bnx2x_get_ringparam(struct net_device *dev,
8997 struct ethtool_ringparam *ering)
8999 struct bnx2x *bp = netdev_priv(dev);
9001 ering->rx_max_pending = MAX_RX_AVAIL;
9002 ering->rx_mini_max_pending = 0;
9003 ering->rx_jumbo_max_pending = 0;
9005 ering->rx_pending = bp->rx_ring_size;
9006 ering->rx_mini_pending = 0;
9007 ering->rx_jumbo_pending = 0;
9009 ering->tx_max_pending = MAX_TX_AVAIL;
9010 ering->tx_pending = bp->tx_ring_size;
9013 static int bnx2x_set_ringparam(struct net_device *dev,
9014 struct ethtool_ringparam *ering)
9016 struct bnx2x *bp = netdev_priv(dev);
9019 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9020 (ering->tx_pending > MAX_TX_AVAIL) ||
9021 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9024 bp->rx_ring_size = ering->rx_pending;
9025 bp->tx_ring_size = ering->tx_pending;
9027 if (netif_running(dev)) {
9028 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9029 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9035 static void bnx2x_get_pauseparam(struct net_device *dev,
9036 struct ethtool_pauseparam *epause)
9038 struct bnx2x *bp = netdev_priv(dev);
9040 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9041 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9043 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9044 BNX2X_FLOW_CTRL_RX);
9045 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9046 BNX2X_FLOW_CTRL_TX);
9048 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9049 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9050 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9053 static int bnx2x_set_pauseparam(struct net_device *dev,
9054 struct ethtool_pauseparam *epause)
9056 struct bnx2x *bp = netdev_priv(dev);
9061 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9062 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9063 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9065 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9067 if (epause->rx_pause)
9068 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9070 if (epause->tx_pause)
9071 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9073 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9074 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9076 if (epause->autoneg) {
9077 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9078 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9082 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9083 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9087 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9089 if (netif_running(dev)) {
9090 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9097 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9099 struct bnx2x *bp = netdev_priv(dev);
9103 /* TPA requires Rx CSUM offloading */
9104 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9105 if (!(dev->features & NETIF_F_LRO)) {
9106 dev->features |= NETIF_F_LRO;
9107 bp->flags |= TPA_ENABLE_FLAG;
9111 } else if (dev->features & NETIF_F_LRO) {
9112 dev->features &= ~NETIF_F_LRO;
9113 bp->flags &= ~TPA_ENABLE_FLAG;
9117 if (changed && netif_running(dev)) {
9118 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9119 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9125 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9127 struct bnx2x *bp = netdev_priv(dev);
9132 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9134 struct bnx2x *bp = netdev_priv(dev);
9139 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9140 TPA'ed packets will be discarded due to wrong TCP CSUM */
9142 u32 flags = ethtool_op_get_flags(dev);
9144 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9150 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9153 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9154 dev->features |= NETIF_F_TSO6;
9156 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9157 dev->features &= ~NETIF_F_TSO6;
9163 static const struct {
9164 char string[ETH_GSTRING_LEN];
9165 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9166 { "register_test (offline)" },
9167 { "memory_test (offline)" },
9168 { "loopback_test (offline)" },
9169 { "nvram_test (online)" },
9170 { "interrupt_test (online)" },
9171 { "link_test (online)" },
9172 { "idle check (online)" }
9175 static int bnx2x_self_test_count(struct net_device *dev)
9177 return BNX2X_NUM_TESTS;
9180 static int bnx2x_test_registers(struct bnx2x *bp)
9182 int idx, i, rc = -ENODEV;
9184 int port = BP_PORT(bp);
9185 static const struct {
9190 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9191 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9192 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9193 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9194 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9195 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9196 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9197 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9198 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9199 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9200 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9201 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9202 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9203 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9204 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9205 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9206 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9207 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9208 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9209 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9210 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9211 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9212 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9213 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9214 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9215 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9216 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9217 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9218 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9219 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9220 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9221 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9222 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9223 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9224 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9225 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9226 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9227 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9229 { 0xffffffff, 0, 0x00000000 }
9232 if (!netif_running(bp->dev))
9235 /* Repeat the test twice:
9236 First by writing 0x00000000, second by writing 0xffffffff */
9237 for (idx = 0; idx < 2; idx++) {
9244 wr_val = 0xffffffff;
9248 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9249 u32 offset, mask, save_val, val;
9251 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9252 mask = reg_tbl[i].mask;
9254 save_val = REG_RD(bp, offset);
9256 REG_WR(bp, offset, wr_val);
9257 val = REG_RD(bp, offset);
9259 /* Restore the original register's value */
9260 REG_WR(bp, offset, save_val);
9262 /* verify that value is as expected value */
9263 if ((val & mask) != (wr_val & mask))
9274 static int bnx2x_test_memory(struct bnx2x *bp)
9276 int i, j, rc = -ENODEV;
9278 static const struct {
9282 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9283 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9284 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9285 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9286 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9287 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9288 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9292 static const struct {
9298 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9299 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9300 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9301 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9302 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9303 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9305 { NULL, 0xffffffff, 0, 0 }
9308 if (!netif_running(bp->dev))
9311 /* Go through all the memories */
9312 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9313 for (j = 0; j < mem_tbl[i].size; j++)
9314 REG_RD(bp, mem_tbl[i].offset + j*4);
9316 /* Check the parity status */
9317 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9318 val = REG_RD(bp, prty_tbl[i].offset);
9319 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9320 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9322 "%s is 0x%x\n", prty_tbl[i].name, val);
9333 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9338 while (bnx2x_link_test(bp) && cnt--)
9342 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9344 unsigned int pkt_size, num_pkts, i;
9345 struct sk_buff *skb;
9346 unsigned char *packet;
9347 struct bnx2x_fastpath *fp = &bp->fp[0];
9348 u16 tx_start_idx, tx_idx;
9349 u16 rx_start_idx, rx_idx;
9351 struct sw_tx_bd *tx_buf;
9352 struct eth_tx_bd *tx_bd;
9354 union eth_rx_cqe *cqe;
9356 struct sw_rx_bd *rx_buf;
9360 /* check the loopback mode */
9361 switch (loopback_mode) {
9362 case BNX2X_PHY_LOOPBACK:
9363 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9366 case BNX2X_MAC_LOOPBACK:
9367 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9368 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9374 /* prepare the loopback packet */
9375 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9376 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9377 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9380 goto test_loopback_exit;
9382 packet = skb_put(skb, pkt_size);
9383 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9384 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9385 for (i = ETH_HLEN; i < pkt_size; i++)
9386 packet[i] = (unsigned char) (i & 0xff);
9388 /* send the loopback packet */
9390 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9391 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9393 pkt_prod = fp->tx_pkt_prod++;
9394 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9395 tx_buf->first_bd = fp->tx_bd_prod;
9398 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9399 mapping = pci_map_single(bp->pdev, skb->data,
9400 skb_headlen(skb), PCI_DMA_TODEVICE);
9401 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9402 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9403 tx_bd->nbd = cpu_to_le16(1);
9404 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9405 tx_bd->vlan = cpu_to_le16(pkt_prod);
9406 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9407 ETH_TX_BD_FLAGS_END_BD);
9408 tx_bd->general_data = ((UNICAST_ADDRESS <<
9409 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9413 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9414 mb(); /* FW restriction: must not reorder writing nbd and packets */
9415 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9416 DOORBELL(bp, fp->index, 0);
9422 bp->dev->trans_start = jiffies;
9426 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9427 if (tx_idx != tx_start_idx + num_pkts)
9428 goto test_loopback_exit;
9430 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9431 if (rx_idx != rx_start_idx + num_pkts)
9432 goto test_loopback_exit;
9434 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9435 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9436 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9437 goto test_loopback_rx_exit;
9439 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9440 if (len != pkt_size)
9441 goto test_loopback_rx_exit;
9443 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9445 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9446 for (i = ETH_HLEN; i < pkt_size; i++)
9447 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9448 goto test_loopback_rx_exit;
9452 test_loopback_rx_exit:
9454 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9455 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9456 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9457 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9459 /* Update producers */
9460 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9464 bp->link_params.loopback_mode = LOOPBACK_NONE;
9469 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9473 if (!netif_running(bp->dev))
9474 return BNX2X_LOOPBACK_FAILED;
9476 bnx2x_netif_stop(bp, 1);
9477 bnx2x_acquire_phy_lock(bp);
9479 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9481 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9482 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9485 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9487 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9488 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9491 bnx2x_release_phy_lock(bp);
9492 bnx2x_netif_start(bp);
9497 #define CRC32_RESIDUAL 0xdebb20e3
9499 static int bnx2x_test_nvram(struct bnx2x *bp)
9501 static const struct {
9505 { 0, 0x14 }, /* bootstrap */
9506 { 0x14, 0xec }, /* dir */
9507 { 0x100, 0x350 }, /* manuf_info */
9508 { 0x450, 0xf0 }, /* feature_info */
9509 { 0x640, 0x64 }, /* upgrade_key_info */
9511 { 0x708, 0x70 }, /* manuf_key_info */
9515 __be32 buf[0x350 / 4];
9516 u8 *data = (u8 *)buf;
9520 rc = bnx2x_nvram_read(bp, 0, data, 4);
9522 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9523 goto test_nvram_exit;
9526 magic = be32_to_cpu(buf[0]);
9527 if (magic != 0x669955aa) {
9528 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9530 goto test_nvram_exit;
9533 for (i = 0; nvram_tbl[i].size; i++) {
9535 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9539 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9540 goto test_nvram_exit;
9543 csum = ether_crc_le(nvram_tbl[i].size, data);
9544 if (csum != CRC32_RESIDUAL) {
9546 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9548 goto test_nvram_exit;
9556 static int bnx2x_test_intr(struct bnx2x *bp)
9558 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9561 if (!netif_running(bp->dev))
9564 config->hdr.length = 0;
9566 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9568 config->hdr.offset = BP_FUNC(bp);
9569 config->hdr.client_id = bp->fp->cl_id;
9570 config->hdr.reserved1 = 0;
9572 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9573 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9574 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9576 bp->set_mac_pending++;
9577 for (i = 0; i < 10; i++) {
9578 if (!bp->set_mac_pending)
9580 msleep_interruptible(10);
9589 static void bnx2x_self_test(struct net_device *dev,
9590 struct ethtool_test *etest, u64 *buf)
9592 struct bnx2x *bp = netdev_priv(dev);
9594 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9596 if (!netif_running(dev))
9599 /* offline tests are not supported in MF mode */
9601 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9603 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9606 link_up = bp->link_vars.link_up;
9607 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9608 bnx2x_nic_load(bp, LOAD_DIAG);
9609 /* wait until link state is restored */
9610 bnx2x_wait_for_link(bp, link_up);
9612 if (bnx2x_test_registers(bp) != 0) {
9614 etest->flags |= ETH_TEST_FL_FAILED;
9616 if (bnx2x_test_memory(bp) != 0) {
9618 etest->flags |= ETH_TEST_FL_FAILED;
9620 buf[2] = bnx2x_test_loopback(bp, link_up);
9622 etest->flags |= ETH_TEST_FL_FAILED;
9624 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9625 bnx2x_nic_load(bp, LOAD_NORMAL);
9626 /* wait until link state is restored */
9627 bnx2x_wait_for_link(bp, link_up);
9629 if (bnx2x_test_nvram(bp) != 0) {
9631 etest->flags |= ETH_TEST_FL_FAILED;
9633 if (bnx2x_test_intr(bp) != 0) {
9635 etest->flags |= ETH_TEST_FL_FAILED;
9638 if (bnx2x_link_test(bp) != 0) {
9640 etest->flags |= ETH_TEST_FL_FAILED;
9643 #ifdef BNX2X_EXTRA_DEBUG
9644 bnx2x_panic_dump(bp);
9648 static const struct {
9651 u8 string[ETH_GSTRING_LEN];
9652 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9653 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9654 { Q_STATS_OFFSET32(error_bytes_received_hi),
9655 8, "[%d]: rx_error_bytes" },
9656 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9657 8, "[%d]: rx_ucast_packets" },
9658 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9659 8, "[%d]: rx_mcast_packets" },
9660 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9661 8, "[%d]: rx_bcast_packets" },
9662 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9663 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9664 4, "[%d]: rx_phy_ip_err_discards"},
9665 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9666 4, "[%d]: rx_skb_alloc_discard" },
9667 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9669 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9670 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9671 8, "[%d]: tx_packets" }
9674 static const struct {
9678 #define STATS_FLAGS_PORT 1
9679 #define STATS_FLAGS_FUNC 2
9680 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9681 u8 string[ETH_GSTRING_LEN];
9682 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9683 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9684 8, STATS_FLAGS_BOTH, "rx_bytes" },
9685 { STATS_OFFSET32(error_bytes_received_hi),
9686 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9687 { STATS_OFFSET32(total_unicast_packets_received_hi),
9688 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9689 { STATS_OFFSET32(total_multicast_packets_received_hi),
9690 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9691 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9692 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9693 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9694 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9695 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9696 8, STATS_FLAGS_PORT, "rx_align_errors" },
9697 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9698 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9699 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9700 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9701 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9702 8, STATS_FLAGS_PORT, "rx_fragments" },
9703 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9704 8, STATS_FLAGS_PORT, "rx_jabbers" },
9705 { STATS_OFFSET32(no_buff_discard_hi),
9706 8, STATS_FLAGS_BOTH, "rx_discards" },
9707 { STATS_OFFSET32(mac_filter_discard),
9708 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9709 { STATS_OFFSET32(xxoverflow_discard),
9710 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9711 { STATS_OFFSET32(brb_drop_hi),
9712 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9713 { STATS_OFFSET32(brb_truncate_hi),
9714 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9715 { STATS_OFFSET32(pause_frames_received_hi),
9716 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9717 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9718 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9719 { STATS_OFFSET32(nig_timer_max),
9720 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9721 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9722 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9723 { STATS_OFFSET32(rx_skb_alloc_failed),
9724 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9725 { STATS_OFFSET32(hw_csum_err),
9726 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9728 { STATS_OFFSET32(total_bytes_transmitted_hi),
9729 8, STATS_FLAGS_BOTH, "tx_bytes" },
9730 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9731 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9732 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9733 8, STATS_FLAGS_BOTH, "tx_packets" },
9734 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9735 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9736 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9737 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9738 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9739 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9740 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9741 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9742 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9743 8, STATS_FLAGS_PORT, "tx_deferred" },
9744 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9745 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9746 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9747 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9748 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9749 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9750 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9751 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9752 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9753 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9754 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9755 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9756 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9757 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9758 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9759 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9760 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9761 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9762 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9763 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9764 { STATS_OFFSET32(pause_frames_sent_hi),
9765 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9768 #define IS_PORT_STAT(i) \
9769 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9770 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9771 #define IS_E1HMF_MODE_STAT(bp) \
9772 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9774 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9776 struct bnx2x *bp = netdev_priv(dev);
9779 switch (stringset) {
9783 for_each_queue(bp, i) {
9784 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9785 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9786 bnx2x_q_stats_arr[j].string, i);
9787 k += BNX2X_NUM_Q_STATS;
9789 if (IS_E1HMF_MODE_STAT(bp))
9791 for (j = 0; j < BNX2X_NUM_STATS; j++)
9792 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9793 bnx2x_stats_arr[j].string);
9795 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9796 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9798 strcpy(buf + j*ETH_GSTRING_LEN,
9799 bnx2x_stats_arr[i].string);
9806 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9811 static int bnx2x_get_stats_count(struct net_device *dev)
9813 struct bnx2x *bp = netdev_priv(dev);
9817 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9818 if (!IS_E1HMF_MODE_STAT(bp))
9819 num_stats += BNX2X_NUM_STATS;
9821 if (IS_E1HMF_MODE_STAT(bp)) {
9823 for (i = 0; i < BNX2X_NUM_STATS; i++)
9824 if (IS_FUNC_STAT(i))
9827 num_stats = BNX2X_NUM_STATS;
9833 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9834 struct ethtool_stats *stats, u64 *buf)
9836 struct bnx2x *bp = netdev_priv(dev);
9837 u32 *hw_stats, *offset;
9842 for_each_queue(bp, i) {
9843 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9844 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9845 if (bnx2x_q_stats_arr[j].size == 0) {
9846 /* skip this counter */
9850 offset = (hw_stats +
9851 bnx2x_q_stats_arr[j].offset);
9852 if (bnx2x_q_stats_arr[j].size == 4) {
9853 /* 4-byte counter */
9854 buf[k + j] = (u64) *offset;
9857 /* 8-byte counter */
9858 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9860 k += BNX2X_NUM_Q_STATS;
9862 if (IS_E1HMF_MODE_STAT(bp))
9864 hw_stats = (u32 *)&bp->eth_stats;
9865 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9866 if (bnx2x_stats_arr[j].size == 0) {
9867 /* skip this counter */
9871 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9872 if (bnx2x_stats_arr[j].size == 4) {
9873 /* 4-byte counter */
9874 buf[k + j] = (u64) *offset;
9877 /* 8-byte counter */
9878 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9881 hw_stats = (u32 *)&bp->eth_stats;
9882 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9883 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9885 if (bnx2x_stats_arr[i].size == 0) {
9886 /* skip this counter */
9891 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9892 if (bnx2x_stats_arr[i].size == 4) {
9893 /* 4-byte counter */
9894 buf[j] = (u64) *offset;
9898 /* 8-byte counter */
9899 buf[j] = HILO_U64(*offset, *(offset + 1));
9905 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9907 struct bnx2x *bp = netdev_priv(dev);
9908 int port = BP_PORT(bp);
9911 if (!netif_running(dev))
9920 for (i = 0; i < (data * 2); i++) {
9922 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9923 bp->link_params.hw_led_mode,
9924 bp->link_params.chip_id);
9926 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9927 bp->link_params.hw_led_mode,
9928 bp->link_params.chip_id);
9930 msleep_interruptible(500);
9931 if (signal_pending(current))
9935 if (bp->link_vars.link_up)
9936 bnx2x_set_led(bp, port, LED_MODE_OPER,
9937 bp->link_vars.line_speed,
9938 bp->link_params.hw_led_mode,
9939 bp->link_params.chip_id);
9944 static struct ethtool_ops bnx2x_ethtool_ops = {
9945 .get_settings = bnx2x_get_settings,
9946 .set_settings = bnx2x_set_settings,
9947 .get_drvinfo = bnx2x_get_drvinfo,
9948 .get_wol = bnx2x_get_wol,
9949 .set_wol = bnx2x_set_wol,
9950 .get_msglevel = bnx2x_get_msglevel,
9951 .set_msglevel = bnx2x_set_msglevel,
9952 .nway_reset = bnx2x_nway_reset,
9953 .get_link = ethtool_op_get_link,
9954 .get_eeprom_len = bnx2x_get_eeprom_len,
9955 .get_eeprom = bnx2x_get_eeprom,
9956 .set_eeprom = bnx2x_set_eeprom,
9957 .get_coalesce = bnx2x_get_coalesce,
9958 .set_coalesce = bnx2x_set_coalesce,
9959 .get_ringparam = bnx2x_get_ringparam,
9960 .set_ringparam = bnx2x_set_ringparam,
9961 .get_pauseparam = bnx2x_get_pauseparam,
9962 .set_pauseparam = bnx2x_set_pauseparam,
9963 .get_rx_csum = bnx2x_get_rx_csum,
9964 .set_rx_csum = bnx2x_set_rx_csum,
9965 .get_tx_csum = ethtool_op_get_tx_csum,
9966 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9967 .set_flags = bnx2x_set_flags,
9968 .get_flags = ethtool_op_get_flags,
9969 .get_sg = ethtool_op_get_sg,
9970 .set_sg = ethtool_op_set_sg,
9971 .get_tso = ethtool_op_get_tso,
9972 .set_tso = bnx2x_set_tso,
9973 .self_test_count = bnx2x_self_test_count,
9974 .self_test = bnx2x_self_test,
9975 .get_strings = bnx2x_get_strings,
9976 .phys_id = bnx2x_phys_id,
9977 .get_stats_count = bnx2x_get_stats_count,
9978 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9981 /* end of ethtool_ops */
9983 /****************************************************************************
9984 * General service functions
9985 ****************************************************************************/
9987 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9991 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9995 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9996 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9997 PCI_PM_CTRL_PME_STATUS));
9999 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10000 /* delay required during transition out of D3hot */
10005 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10009 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10011 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10014 /* No more memory access after this point until
10015 * device is brought back to D0.
10025 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10029 /* Tell compiler that status block fields can change */
10031 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10032 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10034 return (fp->rx_comp_cons != rx_cons_sb);
10038 * net_device service functions
10041 static int bnx2x_poll(struct napi_struct *napi, int budget)
10043 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10045 struct bnx2x *bp = fp->bp;
10048 #ifdef BNX2X_STOP_ON_ERROR
10049 if (unlikely(bp->panic))
10053 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10054 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10055 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10057 bnx2x_update_fpsb_idx(fp);
10059 if (bnx2x_has_tx_work(fp))
10060 bnx2x_tx_int(fp, budget);
10062 if (bnx2x_has_rx_work(fp))
10063 work_done = bnx2x_rx_int(fp, budget);
10064 rmb(); /* BNX2X_HAS_WORK() reads the status block */
10066 /* must not complete if we consumed full budget */
10067 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
10069 #ifdef BNX2X_STOP_ON_ERROR
10072 napi_complete(napi);
10074 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10075 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10076 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10077 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10083 /* we split the first BD into headers and data BDs
10084 * to ease the pain of our fellow microcode engineers
10085 * we use one mapping for both BDs
10086 * So far this has only been observed to happen
10087 * in Other Operating Systems(TM)
10089 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10090 struct bnx2x_fastpath *fp,
10091 struct eth_tx_bd **tx_bd, u16 hlen,
10092 u16 bd_prod, int nbd)
10094 struct eth_tx_bd *h_tx_bd = *tx_bd;
10095 struct eth_tx_bd *d_tx_bd;
10096 dma_addr_t mapping;
10097 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10099 /* first fix first BD */
10100 h_tx_bd->nbd = cpu_to_le16(nbd);
10101 h_tx_bd->nbytes = cpu_to_le16(hlen);
10103 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10104 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10105 h_tx_bd->addr_lo, h_tx_bd->nbd);
10107 /* now get a new data BD
10108 * (after the pbd) and fill it */
10109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10110 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10112 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10113 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10115 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10116 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10117 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10119 /* this marks the BD as one that has no individual mapping
10120 * the FW ignores this flag in a BD not marked start
10122 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10123 DP(NETIF_MSG_TX_QUEUED,
10124 "TSO split data size is %d (%x:%x)\n",
10125 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10127 /* update tx_bd for marking the last BD flag */
10133 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10136 csum = (u16) ~csum_fold(csum_sub(csum,
10137 csum_partial(t_header - fix, fix, 0)));
10140 csum = (u16) ~csum_fold(csum_add(csum,
10141 csum_partial(t_header, -fix, 0)));
10143 return swab16(csum);
10146 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10150 if (skb->ip_summed != CHECKSUM_PARTIAL)
10154 if (skb->protocol == htons(ETH_P_IPV6)) {
10156 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10157 rc |= XMIT_CSUM_TCP;
10161 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10162 rc |= XMIT_CSUM_TCP;
10166 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10169 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10175 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10176 /* check if packet requires linearization (packet is too fragmented) */
10177 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10182 int first_bd_sz = 0;
10184 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10185 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10187 if (xmit_type & XMIT_GSO) {
10188 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10189 /* Check if LSO packet needs to be copied:
10190 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10191 int wnd_size = MAX_FETCH_BD - 3;
10192 /* Number of windows to check */
10193 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10198 /* Headers length */
10199 hlen = (int)(skb_transport_header(skb) - skb->data) +
10202 /* Amount of data (w/o headers) on linear part of SKB*/
10203 first_bd_sz = skb_headlen(skb) - hlen;
10205 wnd_sum = first_bd_sz;
10207 /* Calculate the first sum - it's special */
10208 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10210 skb_shinfo(skb)->frags[frag_idx].size;
10212 /* If there was data on linear skb data - check it */
10213 if (first_bd_sz > 0) {
10214 if (unlikely(wnd_sum < lso_mss)) {
10219 wnd_sum -= first_bd_sz;
10222 /* Others are easier: run through the frag list and
10223 check all windows */
10224 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10226 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10228 if (unlikely(wnd_sum < lso_mss)) {
10233 skb_shinfo(skb)->frags[wnd_idx].size;
10237 /* in non-LSO too fragmented packet should always
10244 if (unlikely(to_copy))
10245 DP(NETIF_MSG_TX_QUEUED,
10246 "Linearization IS REQUIRED for %s packet. "
10247 "num_frags %d hlen %d first_bd_sz %d\n",
10248 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10249 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10255 /* called with netif_tx_lock
10256 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10257 * netif_wake_queue()
10259 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10261 struct bnx2x *bp = netdev_priv(dev);
10262 struct bnx2x_fastpath *fp;
10263 struct netdev_queue *txq;
10264 struct sw_tx_bd *tx_buf;
10265 struct eth_tx_bd *tx_bd;
10266 struct eth_tx_parse_bd *pbd = NULL;
10267 u16 pkt_prod, bd_prod;
10269 dma_addr_t mapping;
10270 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10271 int vlan_off = (bp->e1hov ? 4 : 0);
10275 #ifdef BNX2X_STOP_ON_ERROR
10276 if (unlikely(bp->panic))
10277 return NETDEV_TX_BUSY;
10280 fp_index = skb_get_queue_mapping(skb);
10281 txq = netdev_get_tx_queue(dev, fp_index);
10283 fp = &bp->fp[fp_index];
10285 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10286 fp->eth_q_stats.driver_xoff++,
10287 netif_tx_stop_queue(txq);
10288 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10289 return NETDEV_TX_BUSY;
10292 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10293 " gso type %x xmit_type %x\n",
10294 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10295 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10297 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10298 /* First, check if we need to linearize the skb
10299 (due to FW restrictions) */
10300 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10301 /* Statistics of linearization */
10303 if (skb_linearize(skb) != 0) {
10304 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10305 "silently dropping this SKB\n");
10306 dev_kfree_skb_any(skb);
10307 return NETDEV_TX_OK;
10313 Please read carefully. First we use one BD which we mark as start,
10314 then for TSO or xsum we have a parsing info BD,
10315 and only then we have the rest of the TSO BDs.
10316 (don't forget to mark the last one as last,
10317 and to unmap only AFTER you write to the BD ...)
10318 And above all, all pdb sizes are in words - NOT DWORDS!
10321 pkt_prod = fp->tx_pkt_prod++;
10322 bd_prod = TX_BD(fp->tx_bd_prod);
10324 /* get a tx_buf and first BD */
10325 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10326 tx_bd = &fp->tx_desc_ring[bd_prod];
10328 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10329 tx_bd->general_data = (UNICAST_ADDRESS <<
10330 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10332 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10334 /* remember the first BD of the packet */
10335 tx_buf->first_bd = fp->tx_bd_prod;
10338 DP(NETIF_MSG_TX_QUEUED,
10339 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10340 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10343 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10344 (bp->flags & HW_VLAN_TX_FLAG)) {
10345 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10346 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10350 tx_bd->vlan = cpu_to_le16(pkt_prod);
10353 /* turn on parsing and get a BD */
10354 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10355 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10357 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10360 if (xmit_type & XMIT_CSUM) {
10361 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10363 /* for now NS flag is not used in Linux */
10365 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10366 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10368 pbd->ip_hlen = (skb_transport_header(skb) -
10369 skb_network_header(skb)) / 2;
10371 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10373 pbd->total_hlen = cpu_to_le16(hlen);
10374 hlen = hlen*2 - vlan_off;
10376 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10378 if (xmit_type & XMIT_CSUM_V4)
10379 tx_bd->bd_flags.as_bitfield |=
10380 ETH_TX_BD_FLAGS_IP_CSUM;
10382 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10384 if (xmit_type & XMIT_CSUM_TCP) {
10385 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10388 s8 fix = SKB_CS_OFF(skb); /* signed! */
10390 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10391 pbd->cs_offset = fix / 2;
10393 DP(NETIF_MSG_TX_QUEUED,
10394 "hlen %d offset %d fix %d csum before fix %x\n",
10395 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10398 /* HW bug: fixup the CSUM */
10399 pbd->tcp_pseudo_csum =
10400 bnx2x_csum_fix(skb_transport_header(skb),
10403 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10404 pbd->tcp_pseudo_csum);
10408 mapping = pci_map_single(bp->pdev, skb->data,
10409 skb_headlen(skb), PCI_DMA_TODEVICE);
10411 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10412 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10413 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10414 tx_bd->nbd = cpu_to_le16(nbd);
10415 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10417 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10418 " nbytes %d flags %x vlan %x\n",
10419 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10420 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10421 le16_to_cpu(tx_bd->vlan));
10423 if (xmit_type & XMIT_GSO) {
10425 DP(NETIF_MSG_TX_QUEUED,
10426 "TSO packet len %d hlen %d total len %d tso size %d\n",
10427 skb->len, hlen, skb_headlen(skb),
10428 skb_shinfo(skb)->gso_size);
10430 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10432 if (unlikely(skb_headlen(skb) > hlen))
10433 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10436 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10437 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10438 pbd->tcp_flags = pbd_tcp_flags(skb);
10440 if (xmit_type & XMIT_GSO_V4) {
10441 pbd->ip_id = swab16(ip_hdr(skb)->id);
10442 pbd->tcp_pseudo_csum =
10443 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10444 ip_hdr(skb)->daddr,
10445 0, IPPROTO_TCP, 0));
10448 pbd->tcp_pseudo_csum =
10449 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10450 &ipv6_hdr(skb)->daddr,
10451 0, IPPROTO_TCP, 0));
10453 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10456 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10457 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10459 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10460 tx_bd = &fp->tx_desc_ring[bd_prod];
10462 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10463 frag->size, PCI_DMA_TODEVICE);
10465 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10466 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10467 tx_bd->nbytes = cpu_to_le16(frag->size);
10468 tx_bd->vlan = cpu_to_le16(pkt_prod);
10469 tx_bd->bd_flags.as_bitfield = 0;
10471 DP(NETIF_MSG_TX_QUEUED,
10472 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10473 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10474 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10477 /* now at last mark the BD as the last BD */
10478 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10480 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10481 tx_bd, tx_bd->bd_flags.as_bitfield);
10483 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10485 /* now send a tx doorbell, counting the next BD
10486 * if the packet contains or ends with it
10488 if (TX_BD_POFF(bd_prod) < nbd)
10492 DP(NETIF_MSG_TX_QUEUED,
10493 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10494 " tcp_flags %x xsum %x seq %u hlen %u\n",
10495 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10496 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10497 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10499 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10502 * Make sure that the BD data is updated before updating the producer
10503 * since FW might read the BD right after the producer is updated.
10504 * This is only applicable for weak-ordered memory model archs such
10505 * as IA-64. The following barrier is also mandatory since FW will
10506 * assumes packets must have BDs.
10510 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10511 mb(); /* FW restriction: must not reorder writing nbd and packets */
10512 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10513 DOORBELL(bp, fp->index, 0);
10517 fp->tx_bd_prod += nbd;
10518 dev->trans_start = jiffies;
10520 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10521 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10522 if we put Tx into XOFF state. */
10524 netif_tx_stop_queue(txq);
10525 fp->eth_q_stats.driver_xoff++;
10526 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10527 netif_tx_wake_queue(txq);
10531 return NETDEV_TX_OK;
10534 /* called with rtnl_lock */
10535 static int bnx2x_open(struct net_device *dev)
10537 struct bnx2x *bp = netdev_priv(dev);
10539 netif_carrier_off(dev);
10541 bnx2x_set_power_state(bp, PCI_D0);
10543 return bnx2x_nic_load(bp, LOAD_OPEN);
10546 /* called with rtnl_lock */
10547 static int bnx2x_close(struct net_device *dev)
10549 struct bnx2x *bp = netdev_priv(dev);
10551 /* Unload the driver, release IRQs */
10552 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10553 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10554 if (!CHIP_REV_IS_SLOW(bp))
10555 bnx2x_set_power_state(bp, PCI_D3hot);
10560 /* called with netif_tx_lock from set_multicast */
10561 static void bnx2x_set_rx_mode(struct net_device *dev)
10563 struct bnx2x *bp = netdev_priv(dev);
10564 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10565 int port = BP_PORT(bp);
10567 if (bp->state != BNX2X_STATE_OPEN) {
10568 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10572 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10574 if (dev->flags & IFF_PROMISC)
10575 rx_mode = BNX2X_RX_MODE_PROMISC;
10577 else if ((dev->flags & IFF_ALLMULTI) ||
10578 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10579 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10581 else { /* some multicasts */
10582 if (CHIP_IS_E1(bp)) {
10583 int i, old, offset;
10584 struct dev_mc_list *mclist;
10585 struct mac_configuration_cmd *config =
10586 bnx2x_sp(bp, mcast_config);
10588 for (i = 0, mclist = dev->mc_list;
10589 mclist && (i < dev->mc_count);
10590 i++, mclist = mclist->next) {
10592 config->config_table[i].
10593 cam_entry.msb_mac_addr =
10594 swab16(*(u16 *)&mclist->dmi_addr[0]);
10595 config->config_table[i].
10596 cam_entry.middle_mac_addr =
10597 swab16(*(u16 *)&mclist->dmi_addr[2]);
10598 config->config_table[i].
10599 cam_entry.lsb_mac_addr =
10600 swab16(*(u16 *)&mclist->dmi_addr[4]);
10601 config->config_table[i].cam_entry.flags =
10603 config->config_table[i].
10604 target_table_entry.flags = 0;
10605 config->config_table[i].
10606 target_table_entry.client_id = 0;
10607 config->config_table[i].
10608 target_table_entry.vlan_id = 0;
10611 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10612 config->config_table[i].
10613 cam_entry.msb_mac_addr,
10614 config->config_table[i].
10615 cam_entry.middle_mac_addr,
10616 config->config_table[i].
10617 cam_entry.lsb_mac_addr);
10619 old = config->hdr.length;
10621 for (; i < old; i++) {
10622 if (CAM_IS_INVALID(config->
10623 config_table[i])) {
10624 /* already invalidated */
10628 CAM_INVALIDATE(config->
10633 if (CHIP_REV_IS_SLOW(bp))
10634 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10636 offset = BNX2X_MAX_MULTICAST*(1 + port);
10638 config->hdr.length = i;
10639 config->hdr.offset = offset;
10640 config->hdr.client_id = bp->fp->cl_id;
10641 config->hdr.reserved1 = 0;
10643 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10644 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10645 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10648 /* Accept one or more multicasts */
10649 struct dev_mc_list *mclist;
10650 u32 mc_filter[MC_HASH_SIZE];
10651 u32 crc, bit, regidx;
10654 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10656 for (i = 0, mclist = dev->mc_list;
10657 mclist && (i < dev->mc_count);
10658 i++, mclist = mclist->next) {
10660 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10663 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10664 bit = (crc >> 24) & 0xff;
10667 mc_filter[regidx] |= (1 << bit);
10670 for (i = 0; i < MC_HASH_SIZE; i++)
10671 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10676 bp->rx_mode = rx_mode;
10677 bnx2x_set_storm_rx_mode(bp);
10680 /* called with rtnl_lock */
10681 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10683 struct sockaddr *addr = p;
10684 struct bnx2x *bp = netdev_priv(dev);
10686 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10689 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10690 if (netif_running(dev)) {
10691 if (CHIP_IS_E1(bp))
10692 bnx2x_set_mac_addr_e1(bp, 1);
10694 bnx2x_set_mac_addr_e1h(bp, 1);
10700 /* called with rtnl_lock */
10701 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10703 struct mii_ioctl_data *data = if_mii(ifr);
10704 struct bnx2x *bp = netdev_priv(dev);
10705 int port = BP_PORT(bp);
10710 data->phy_id = bp->port.phy_addr;
10714 case SIOCGMIIREG: {
10717 if (!netif_running(dev))
10720 mutex_lock(&bp->port.phy_mutex);
10721 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10722 DEFAULT_PHY_DEV_ADDR,
10723 (data->reg_num & 0x1f), &mii_regval);
10724 data->val_out = mii_regval;
10725 mutex_unlock(&bp->port.phy_mutex);
10730 if (!capable(CAP_NET_ADMIN))
10733 if (!netif_running(dev))
10736 mutex_lock(&bp->port.phy_mutex);
10737 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10738 DEFAULT_PHY_DEV_ADDR,
10739 (data->reg_num & 0x1f), data->val_in);
10740 mutex_unlock(&bp->port.phy_mutex);
10748 return -EOPNOTSUPP;
10751 /* called with rtnl_lock */
10752 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10754 struct bnx2x *bp = netdev_priv(dev);
10757 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10758 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10761 /* This does not race with packet allocation
10762 * because the actual alloc size is
10763 * only updated as part of load
10765 dev->mtu = new_mtu;
10767 if (netif_running(dev)) {
10768 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10769 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10775 static void bnx2x_tx_timeout(struct net_device *dev)
10777 struct bnx2x *bp = netdev_priv(dev);
10779 #ifdef BNX2X_STOP_ON_ERROR
10783 /* This allows the netif to be shutdown gracefully before resetting */
10784 schedule_work(&bp->reset_task);
10788 /* called with rtnl_lock */
10789 static void bnx2x_vlan_rx_register(struct net_device *dev,
10790 struct vlan_group *vlgrp)
10792 struct bnx2x *bp = netdev_priv(dev);
10796 /* Set flags according to the required capabilities */
10797 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10799 if (dev->features & NETIF_F_HW_VLAN_TX)
10800 bp->flags |= HW_VLAN_TX_FLAG;
10802 if (dev->features & NETIF_F_HW_VLAN_RX)
10803 bp->flags |= HW_VLAN_RX_FLAG;
10805 if (netif_running(dev))
10806 bnx2x_set_client_config(bp);
10811 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10812 static void poll_bnx2x(struct net_device *dev)
10814 struct bnx2x *bp = netdev_priv(dev);
10816 disable_irq(bp->pdev->irq);
10817 bnx2x_interrupt(bp->pdev->irq, dev);
10818 enable_irq(bp->pdev->irq);
10822 static const struct net_device_ops bnx2x_netdev_ops = {
10823 .ndo_open = bnx2x_open,
10824 .ndo_stop = bnx2x_close,
10825 .ndo_start_xmit = bnx2x_start_xmit,
10826 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10827 .ndo_set_mac_address = bnx2x_change_mac_addr,
10828 .ndo_validate_addr = eth_validate_addr,
10829 .ndo_do_ioctl = bnx2x_ioctl,
10830 .ndo_change_mtu = bnx2x_change_mtu,
10831 .ndo_tx_timeout = bnx2x_tx_timeout,
10833 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10835 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10836 .ndo_poll_controller = poll_bnx2x,
10841 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10842 struct net_device *dev)
10847 SET_NETDEV_DEV(dev, &pdev->dev);
10848 bp = netdev_priv(dev);
10853 bp->func = PCI_FUNC(pdev->devfn);
10855 rc = pci_enable_device(pdev);
10857 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10861 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10862 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10865 goto err_out_disable;
10868 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10869 printk(KERN_ERR PFX "Cannot find second PCI device"
10870 " base address, aborting\n");
10872 goto err_out_disable;
10875 if (atomic_read(&pdev->enable_cnt) == 1) {
10876 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10878 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10880 goto err_out_disable;
10883 pci_set_master(pdev);
10884 pci_save_state(pdev);
10887 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10888 if (bp->pm_cap == 0) {
10889 printk(KERN_ERR PFX "Cannot find power management"
10890 " capability, aborting\n");
10892 goto err_out_release;
10895 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10896 if (bp->pcie_cap == 0) {
10897 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10900 goto err_out_release;
10903 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10904 bp->flags |= USING_DAC_FLAG;
10905 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10906 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10907 " failed, aborting\n");
10909 goto err_out_release;
10912 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10913 printk(KERN_ERR PFX "System does not support DMA,"
10916 goto err_out_release;
10919 dev->mem_start = pci_resource_start(pdev, 0);
10920 dev->base_addr = dev->mem_start;
10921 dev->mem_end = pci_resource_end(pdev, 0);
10923 dev->irq = pdev->irq;
10925 bp->regview = pci_ioremap_bar(pdev, 0);
10926 if (!bp->regview) {
10927 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10929 goto err_out_release;
10932 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10933 min_t(u64, BNX2X_DB_SIZE,
10934 pci_resource_len(pdev, 2)));
10935 if (!bp->doorbells) {
10936 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10938 goto err_out_unmap;
10941 bnx2x_set_power_state(bp, PCI_D0);
10943 /* clean indirect addresses */
10944 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10945 PCICFG_VENDOR_ID_OFFSET);
10946 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10947 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10948 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10949 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10951 dev->watchdog_timeo = TX_TIMEOUT;
10953 dev->netdev_ops = &bnx2x_netdev_ops;
10954 dev->ethtool_ops = &bnx2x_ethtool_ops;
10955 dev->features |= NETIF_F_SG;
10956 dev->features |= NETIF_F_HW_CSUM;
10957 if (bp->flags & USING_DAC_FLAG)
10958 dev->features |= NETIF_F_HIGHDMA;
10960 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10961 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10963 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10964 dev->features |= NETIF_F_TSO6;
10970 iounmap(bp->regview);
10971 bp->regview = NULL;
10973 if (bp->doorbells) {
10974 iounmap(bp->doorbells);
10975 bp->doorbells = NULL;
10979 if (atomic_read(&pdev->enable_cnt) == 1)
10980 pci_release_regions(pdev);
10983 pci_disable_device(pdev);
10984 pci_set_drvdata(pdev, NULL);
10990 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10992 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10994 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10998 /* return value of 1=2.5GHz 2=5GHz */
10999 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11001 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11003 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11007 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11008 const struct pci_device_id *ent)
11010 static int version_printed;
11011 struct net_device *dev = NULL;
11015 if (version_printed++ == 0)
11016 printk(KERN_INFO "%s", version);
11018 /* dev zeroed in init_etherdev */
11019 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11021 printk(KERN_ERR PFX "Cannot allocate net device\n");
11025 bp = netdev_priv(dev);
11026 bp->msglevel = debug;
11028 rc = bnx2x_init_dev(pdev, dev);
11034 pci_set_drvdata(pdev, dev);
11036 rc = bnx2x_init_bp(bp);
11038 goto init_one_exit;
11040 rc = register_netdev(dev);
11042 dev_err(&pdev->dev, "Cannot register net device\n");
11043 goto init_one_exit;
11046 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11047 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11048 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11049 bnx2x_get_pcie_width(bp),
11050 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11051 dev->base_addr, bp->pdev->irq);
11052 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11057 iounmap(bp->regview);
11060 iounmap(bp->doorbells);
11064 if (atomic_read(&pdev->enable_cnt) == 1)
11065 pci_release_regions(pdev);
11067 pci_disable_device(pdev);
11068 pci_set_drvdata(pdev, NULL);
11073 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11075 struct net_device *dev = pci_get_drvdata(pdev);
11079 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11082 bp = netdev_priv(dev);
11084 unregister_netdev(dev);
11087 iounmap(bp->regview);
11090 iounmap(bp->doorbells);
11094 if (atomic_read(&pdev->enable_cnt) == 1)
11095 pci_release_regions(pdev);
11097 pci_disable_device(pdev);
11098 pci_set_drvdata(pdev, NULL);
11101 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11103 struct net_device *dev = pci_get_drvdata(pdev);
11107 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11110 bp = netdev_priv(dev);
11114 pci_save_state(pdev);
11116 if (!netif_running(dev)) {
11121 netif_device_detach(dev);
11123 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11125 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11132 static int bnx2x_resume(struct pci_dev *pdev)
11134 struct net_device *dev = pci_get_drvdata(pdev);
11139 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11142 bp = netdev_priv(dev);
11146 pci_restore_state(pdev);
11148 if (!netif_running(dev)) {
11153 bnx2x_set_power_state(bp, PCI_D0);
11154 netif_device_attach(dev);
11156 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11163 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11167 bp->state = BNX2X_STATE_ERROR;
11169 bp->rx_mode = BNX2X_RX_MODE_NONE;
11171 bnx2x_netif_stop(bp, 0);
11173 del_timer_sync(&bp->timer);
11174 bp->stats_state = STATS_STATE_DISABLED;
11175 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11178 bnx2x_free_irq(bp);
11180 if (CHIP_IS_E1(bp)) {
11181 struct mac_configuration_cmd *config =
11182 bnx2x_sp(bp, mcast_config);
11184 for (i = 0; i < config->hdr.length; i++)
11185 CAM_INVALIDATE(config->config_table[i]);
11188 /* Free SKBs, SGEs, TPA pool and driver internals */
11189 bnx2x_free_skbs(bp);
11190 for_each_rx_queue(bp, i)
11191 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11192 for_each_rx_queue(bp, i)
11193 netif_napi_del(&bnx2x_fp(bp, i, napi));
11194 bnx2x_free_mem(bp);
11196 bp->state = BNX2X_STATE_CLOSED;
11198 netif_carrier_off(bp->dev);
11203 static void bnx2x_eeh_recover(struct bnx2x *bp)
11207 mutex_init(&bp->port.phy_mutex);
11209 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11210 bp->link_params.shmem_base = bp->common.shmem_base;
11211 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11213 if (!bp->common.shmem_base ||
11214 (bp->common.shmem_base < 0xA0000) ||
11215 (bp->common.shmem_base >= 0xC0000)) {
11216 BNX2X_DEV_INFO("MCP not active\n");
11217 bp->flags |= NO_MCP_FLAG;
11221 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11222 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11223 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11224 BNX2X_ERR("BAD MCP validity signature\n");
11226 if (!BP_NOMCP(bp)) {
11227 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11228 & DRV_MSG_SEQ_NUMBER_MASK);
11229 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11234 * bnx2x_io_error_detected - called when PCI error is detected
11235 * @pdev: Pointer to PCI device
11236 * @state: The current pci connection state
11238 * This function is called after a PCI bus error affecting
11239 * this device has been detected.
11241 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11242 pci_channel_state_t state)
11244 struct net_device *dev = pci_get_drvdata(pdev);
11245 struct bnx2x *bp = netdev_priv(dev);
11249 netif_device_detach(dev);
11251 if (netif_running(dev))
11252 bnx2x_eeh_nic_unload(bp);
11254 pci_disable_device(pdev);
11258 /* Request a slot reset */
11259 return PCI_ERS_RESULT_NEED_RESET;
11263 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11264 * @pdev: Pointer to PCI device
11266 * Restart the card from scratch, as if from a cold-boot.
11268 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11270 struct net_device *dev = pci_get_drvdata(pdev);
11271 struct bnx2x *bp = netdev_priv(dev);
11275 if (pci_enable_device(pdev)) {
11276 dev_err(&pdev->dev,
11277 "Cannot re-enable PCI device after reset\n");
11279 return PCI_ERS_RESULT_DISCONNECT;
11282 pci_set_master(pdev);
11283 pci_restore_state(pdev);
11285 if (netif_running(dev))
11286 bnx2x_set_power_state(bp, PCI_D0);
11290 return PCI_ERS_RESULT_RECOVERED;
11294 * bnx2x_io_resume - called when traffic can start flowing again
11295 * @pdev: Pointer to PCI device
11297 * This callback is called when the error recovery driver tells us that
11298 * its OK to resume normal operation.
11300 static void bnx2x_io_resume(struct pci_dev *pdev)
11302 struct net_device *dev = pci_get_drvdata(pdev);
11303 struct bnx2x *bp = netdev_priv(dev);
11307 bnx2x_eeh_recover(bp);
11309 if (netif_running(dev))
11310 bnx2x_nic_load(bp, LOAD_NORMAL);
11312 netif_device_attach(dev);
11317 static struct pci_error_handlers bnx2x_err_handler = {
11318 .error_detected = bnx2x_io_error_detected,
11319 .slot_reset = bnx2x_io_slot_reset,
11320 .resume = bnx2x_io_resume,
11323 static struct pci_driver bnx2x_pci_driver = {
11324 .name = DRV_MODULE_NAME,
11325 .id_table = bnx2x_pci_tbl,
11326 .probe = bnx2x_init_one,
11327 .remove = __devexit_p(bnx2x_remove_one),
11328 .suspend = bnx2x_suspend,
11329 .resume = bnx2x_resume,
11330 .err_handler = &bnx2x_err_handler,
11333 static int __init bnx2x_init(void)
11335 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11336 if (bnx2x_wq == NULL) {
11337 printk(KERN_ERR PFX "Cannot create workqueue\n");
11341 return pci_register_driver(&bnx2x_pci_driver);
11344 static void __exit bnx2x_cleanup(void)
11346 pci_unregister_driver(&bnx2x_pci_driver);
11348 destroy_workqueue(bnx2x_wq);
11351 module_init(bnx2x_init);
11352 module_exit(bnx2x_cleanup);