1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.26"
61 #define DRV_MODULE_RELDATE "2009/01/26"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
79 static int disable_tpa;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 module_param(disable_tpa, int, 0);
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
90 module_param(poll, int, 0);
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
93 MODULE_PARM_DESC(poll, "use polling (for debug)");
94 MODULE_PARM_DESC(debug, "default debug msglevel");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
614 REG_WR(bp, addr, val);
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
622 REG_WR(bp, addr, val);
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
629 /* enable nig attention */
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
639 static void bnx2x_int_disable(struct bnx2x *bp)
641 int port = BP_PORT(bp);
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
653 /* flush all outstanding writes */
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
661 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
666 /* disable interrupt handling */
667 atomic_inc(&bp->intr_sem);
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
672 /* make sure all ISRs are done */
674 synchronize_irq(bp->msix_table[0].vector);
676 for_each_queue(bp, i)
677 synchronize_irq(bp->msix_table[i + offset].vector);
679 synchronize_irq(bp->pdev->irq);
681 /* make sure sp_task is not running */
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
689 * General service functions
692 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
693 u8 storm, u16 index, u8 op, u8 update)
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
697 struct igu_ack_register igu_ack;
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
711 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
713 struct host_status_block *fpsb = fp->status_blk;
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
728 static u16 bnx2x_ack_int(struct bnx2x *bp)
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
742 * fast path service functions
745 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
749 /* Tell compiler that status block fields can change */
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
752 return (fp->tx_pkt_cons != tx_cons_sb);
755 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
757 /* Tell compiler that consumer and producer can change */
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
763 /* free skb in the packet ring at pos idx
764 * return idx of last bd freed
766 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
785 new_cons = nbd + tx_buf->first_bd;
786 #ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
788 BNX2X_ERR("BAD nbd!\n");
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
825 tx_buf->first_bd = 0;
831 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
837 barrier(); /* Tell compiler that prod and cons can change */
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
845 #ifdef BNX2X_STOP_ON_ERROR
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
851 return (s16)(fp->bp->tx_ring_size) - used;
854 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
856 struct bnx2x *bp = fp->bp;
857 struct netdev_queue *txq;
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
861 #ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
866 txq = netdev_get_tx_queue(bp->dev, fp->index);
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
870 while (sw_cons != hw_cons) {
873 pkt_cons = TX_BD(sw_cons);
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
878 hw_cons, sw_cons, pkt_cons);
880 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
903 /* TBD need a thresh? */
904 if (unlikely(netif_tx_queue_stopped(txq))) {
906 __netif_tx_lock(txq, smp_processor_id());
908 if ((netif_tx_queue_stopped(txq)) &&
909 (bp->state == BNX2X_STATE_OPEN) &&
910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
911 netif_tx_wake_queue(txq);
913 __netif_tx_unlock(txq);
918 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
938 fp->state = BNX2X_FP_STATE_OPEN;
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
944 fp->state = BNX2X_FP_STATE_HALTED;
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
952 mb(); /* force bnx2x_wait_ramrod() to see the change */
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
977 bp->set_mac_pending = 0;
980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
992 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
999 /* Skip "next page" elements */
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 sw_buf->page = NULL;
1012 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1021 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1029 if (unlikely(page == NULL))
1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1033 PCI_DMA_FROMDEVICE);
1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1048 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1061 PCI_DMA_FROMDEVICE);
1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1076 /* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1081 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 bp->rx_offset + RX_COPY_THRESH,
1093 PCI_DMA_FROMDEVICE);
1095 prod_rx_buf->skb = cons_rx_buf->skb;
1096 pci_unmap_addr_set(prod_rx_buf, mapping,
1097 pci_unmap_addr(cons_rx_buf, mapping));
1098 *prod_bd = *cons_bd;
1101 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1104 u16 last_max = fp->last_max_sge;
1106 if (SUB_S16(idx, last_max) > 0)
1107 fp->last_max_sge = idx;
1110 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1114 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1115 int idx = RX_SGE_CNT * i - 1;
1117 for (j = 0; j < 2; j++) {
1118 SGE_MASK_CLEAR_BIT(fp, idx);
1124 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1125 struct eth_fast_path_rx_cqe *fp_cqe)
1127 struct bnx2x *bp = fp->bp;
1128 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1129 le16_to_cpu(fp_cqe->len_on_bd)) >>
1131 u16 last_max, last_elem, first_elem;
1138 /* First mark all used pages */
1139 for (i = 0; i < sge_len; i++)
1140 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1142 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1143 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1145 /* Here we assume that the last SGE index is the biggest */
1146 prefetch((void *)(fp->sge_mask));
1147 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1149 last_max = RX_SGE(fp->last_max_sge);
1150 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1151 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1153 /* If ring is not full */
1154 if (last_elem + 1 != first_elem)
1157 /* Now update the prod */
1158 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1159 if (likely(fp->sge_mask[i]))
1162 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1163 delta += RX_SGE_MASK_ELEM_SZ;
1167 fp->rx_sge_prod += delta;
1168 /* clear page-end entries */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1172 DP(NETIF_MSG_RX_STATUS,
1173 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1174 fp->last_max_sge, fp->rx_sge_prod);
1177 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1179 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1180 memset(fp->sge_mask, 0xff,
1181 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1183 /* Clear the two last indices in the page to 1:
1184 these are the indices that correspond to the "next" element,
1185 hence will never be indicated and should be removed from
1186 the calculations. */
1187 bnx2x_clear_sge_mask_next_elems(fp);
1190 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1191 struct sk_buff *skb, u16 cons, u16 prod)
1193 struct bnx2x *bp = fp->bp;
1194 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1195 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1196 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1199 /* move empty skb from pool to prod and map it */
1200 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1201 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1202 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1203 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1205 /* move partial skb from cons to pool (don't unmap yet) */
1206 fp->tpa_pool[queue] = *cons_rx_buf;
1208 /* mark bin state as start - print error if current state != stop */
1209 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1210 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1212 fp->tpa_state[queue] = BNX2X_TPA_START;
1214 /* point prod_bd to new skb */
1215 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1216 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1218 #ifdef BNX2X_STOP_ON_ERROR
1219 fp->tpa_queue_used |= (1 << queue);
1220 #ifdef __powerpc64__
1221 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1223 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1225 fp->tpa_queue_used);
1229 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1230 struct sk_buff *skb,
1231 struct eth_fast_path_rx_cqe *fp_cqe,
1234 struct sw_rx_page *rx_pg, old_rx_pg;
1235 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1236 u32 i, frag_len, frag_size, pages;
1240 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1241 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1243 /* This is needed in order to enable forwarding support */
1245 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1246 max(frag_size, (u32)len_on_bd));
1248 #ifdef BNX2X_STOP_ON_ERROR
1250 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1251 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1253 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1254 fp_cqe->pkt_len, len_on_bd);
1260 /* Run through the SGL and compose the fragmented skb */
1261 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1262 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1264 /* FW gives the indices of the SGE as if the ring is an array
1265 (meaning that "next" element will consume 2 indices) */
1266 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1267 rx_pg = &fp->rx_page_ring[sge_idx];
1270 /* If we fail to allocate a substitute page, we simply stop
1271 where we are and drop the whole packet */
1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273 if (unlikely(err)) {
1274 bp->eth_stats.rx_skb_alloc_failed++;
1278 /* Unmap the page as we r going to pass it to the stack */
1279 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1280 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1282 /* Add one frag and update the appropriate fields in the skb */
1283 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1285 skb->data_len += frag_len;
1286 skb->truesize += frag_len;
1287 skb->len += frag_len;
1289 frag_size -= frag_len;
1295 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1299 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1300 struct sk_buff *skb = rx_buf->skb;
1302 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1304 /* Unmap skb in the pool anyway, as we are going to change
1305 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1307 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1308 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1310 if (likely(new_skb)) {
1311 /* fix ip xsum and give it to the stack */
1312 /* (no need to map the new skb) */
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN);
1317 int is_not_hwaccel_vlan_cqe =
1318 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1322 prefetch(((char *)(skb)) + 128);
1324 #ifdef BNX2X_STOP_ON_ERROR
1325 if (pad + len > bp->rx_buf_size) {
1326 BNX2X_ERR("skb_put is about to fail... "
1327 "pad %d len %d rx_buf_size %d\n",
1328 pad, len, bp->rx_buf_size);
1334 skb_reserve(skb, pad);
1337 skb->protocol = eth_type_trans(skb, bp->dev);
1338 skb->ip_summed = CHECKSUM_UNNECESSARY;
1339 skb_record_rx_queue(skb, queue);
1344 iph = (struct iphdr *)skb->data;
1346 /* If there is no Rx VLAN offloading -
1347 take VLAN tag into an account */
1348 if (unlikely(is_not_hwaccel_vlan_cqe))
1349 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1352 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1355 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1356 &cqe->fast_path_cqe, cqe_idx)) {
1358 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1359 (!is_not_hwaccel_vlan_cqe))
1360 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1361 le16_to_cpu(cqe->fast_path_cqe.
1365 netif_receive_skb(skb);
1367 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1368 " - dropping packet!\n");
1373 /* put new skb in bin */
1374 fp->tpa_pool[queue].skb = new_skb;
1377 /* else drop the packet and keep the buffer in the bin */
1378 DP(NETIF_MSG_RX_STATUS,
1379 "Failed to allocate new skb - dropping packet!\n");
1380 bp->eth_stats.rx_skb_alloc_failed++;
1383 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1386 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1387 struct bnx2x_fastpath *fp,
1388 u16 bd_prod, u16 rx_comp_prod,
1391 struct ustorm_eth_rx_producers rx_prods = {0};
1394 /* Update producers */
1395 rx_prods.bd_prod = bd_prod;
1396 rx_prods.cqe_prod = rx_comp_prod;
1397 rx_prods.sge_prod = rx_sge_prod;
1400 * Make sure that the BD and SGE data is updated before updating the
1401 * producers since FW might read the BD/SGE right after the producer
1403 * This is only applicable for weak-ordered memory model archs such
1404 * as IA-64. The following barrier is also mandatory since FW will
1405 * assumes BDs must have buffers.
1409 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1410 REG_WR(bp, BAR_USTRORM_INTMEM +
1411 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1412 ((u32 *)&rx_prods)[i]);
1414 mmiowb(); /* keep prod updates ordered */
1416 DP(NETIF_MSG_RX_STATUS,
1417 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1418 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1421 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1423 struct bnx2x *bp = fp->bp;
1424 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1425 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1428 #ifdef BNX2X_STOP_ON_ERROR
1429 if (unlikely(bp->panic))
1433 /* CQ "next element" is of the size of the regular element,
1434 that's why it's ok here */
1435 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1436 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1439 bd_cons = fp->rx_bd_cons;
1440 bd_prod = fp->rx_bd_prod;
1441 bd_prod_fw = bd_prod;
1442 sw_comp_cons = fp->rx_comp_cons;
1443 sw_comp_prod = fp->rx_comp_prod;
1445 /* Memory barrier necessary as speculative reads of the rx
1446 * buffer can be ahead of the index in the status block
1450 DP(NETIF_MSG_RX_STATUS,
1451 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1452 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1454 while (sw_comp_cons != hw_comp_cons) {
1455 struct sw_rx_bd *rx_buf = NULL;
1456 struct sk_buff *skb;
1457 union eth_rx_cqe *cqe;
1461 comp_ring_cons = RCQ_BD(sw_comp_cons);
1462 bd_prod = RX_BD(bd_prod);
1463 bd_cons = RX_BD(bd_cons);
1465 cqe = &fp->rx_comp_ring[comp_ring_cons];
1466 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1468 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1469 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1470 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1471 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1472 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1473 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1475 /* is this a slowpath msg? */
1476 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1477 bnx2x_sp_event(fp, cqe);
1480 /* this is an rx packet */
1482 rx_buf = &fp->rx_buf_ring[bd_cons];
1484 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1485 pad = cqe->fast_path_cqe.placement_offset;
1487 /* If CQE is marked both TPA_START and TPA_END
1488 it is a non-TPA CQE */
1489 if ((!fp->disable_tpa) &&
1490 (TPA_TYPE(cqe_fp_flags) !=
1491 (TPA_TYPE_START | TPA_TYPE_END))) {
1492 u16 queue = cqe->fast_path_cqe.queue_index;
1494 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1495 DP(NETIF_MSG_RX_STATUS,
1496 "calling tpa_start on queue %d\n",
1499 bnx2x_tpa_start(fp, queue, skb,
1504 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1505 DP(NETIF_MSG_RX_STATUS,
1506 "calling tpa_stop on queue %d\n",
1509 if (!BNX2X_RX_SUM_FIX(cqe))
1510 BNX2X_ERR("STOP on none TCP "
1513 /* This is a size of the linear data
1515 len = le16_to_cpu(cqe->fast_path_cqe.
1517 bnx2x_tpa_stop(bp, fp, queue, pad,
1518 len, cqe, comp_ring_cons);
1519 #ifdef BNX2X_STOP_ON_ERROR
1524 bnx2x_update_sge_prod(fp,
1525 &cqe->fast_path_cqe);
1530 pci_dma_sync_single_for_device(bp->pdev,
1531 pci_unmap_addr(rx_buf, mapping),
1532 pad + RX_COPY_THRESH,
1533 PCI_DMA_FROMDEVICE);
1535 prefetch(((char *)(skb)) + 128);
1537 /* is this an error packet? */
1538 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1539 DP(NETIF_MSG_RX_ERR,
1540 "ERROR flags %x rx packet %u\n",
1541 cqe_fp_flags, sw_comp_cons);
1542 bp->eth_stats.rx_err_discard_pkt++;
1546 /* Since we don't have a jumbo ring
1547 * copy small packets if mtu > 1500
1549 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1550 (len <= RX_COPY_THRESH)) {
1551 struct sk_buff *new_skb;
1553 new_skb = netdev_alloc_skb(bp->dev,
1555 if (new_skb == NULL) {
1556 DP(NETIF_MSG_RX_ERR,
1557 "ERROR packet dropped "
1558 "because of alloc failure\n");
1559 bp->eth_stats.rx_skb_alloc_failed++;
1564 skb_copy_from_linear_data_offset(skb, pad,
1565 new_skb->data + pad, len);
1566 skb_reserve(new_skb, pad);
1567 skb_put(new_skb, len);
1569 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1573 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1574 pci_unmap_single(bp->pdev,
1575 pci_unmap_addr(rx_buf, mapping),
1577 PCI_DMA_FROMDEVICE);
1578 skb_reserve(skb, pad);
1582 DP(NETIF_MSG_RX_ERR,
1583 "ERROR packet dropped because "
1584 "of alloc failure\n");
1585 bp->eth_stats.rx_skb_alloc_failed++;
1587 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591 skb->protocol = eth_type_trans(skb, bp->dev);
1593 skb->ip_summed = CHECKSUM_NONE;
1595 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1596 skb->ip_summed = CHECKSUM_UNNECESSARY;
1598 bp->eth_stats.hw_csum_err++;
1603 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1604 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1605 PARSING_FLAGS_VLAN))
1606 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1610 netif_receive_skb(skb);
1616 bd_cons = NEXT_RX_IDX(bd_cons);
1617 bd_prod = NEXT_RX_IDX(bd_prod);
1618 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1621 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1622 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1624 if (rx_pkt == budget)
1628 fp->rx_bd_cons = bd_cons;
1629 fp->rx_bd_prod = bd_prod_fw;
1630 fp->rx_comp_cons = sw_comp_cons;
1631 fp->rx_comp_prod = sw_comp_prod;
1633 /* Update producers */
1634 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1637 fp->rx_pkt += rx_pkt;
1643 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1645 struct bnx2x_fastpath *fp = fp_cookie;
1646 struct bnx2x *bp = fp->bp;
1647 int index = FP_IDX(fp);
1649 /* Return here if interrupt is disabled */
1650 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1655 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1656 index, FP_SB_ID(fp));
1657 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1659 #ifdef BNX2X_STOP_ON_ERROR
1660 if (unlikely(bp->panic))
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1669 napi_schedule(&bnx2x_fp(bp, index, napi));
1674 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1676 struct bnx2x *bp = netdev_priv(dev_instance);
1677 u16 status = bnx2x_ack_int(bp);
1680 /* Return here if interrupt is shared and it's not for us */
1681 if (unlikely(status == 0)) {
1682 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1685 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1687 /* Return here if interrupt is disabled */
1688 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1689 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1693 #ifdef BNX2X_STOP_ON_ERROR
1694 if (unlikely(bp->panic))
1698 mask = 0x2 << bp->fp[0].sb_id;
1699 if (status & mask) {
1700 struct bnx2x_fastpath *fp = &bp->fp[0];
1702 prefetch(fp->rx_cons_sb);
1703 prefetch(fp->tx_cons_sb);
1704 prefetch(&fp->status_blk->c_status_block.status_block_index);
1705 prefetch(&fp->status_blk->u_status_block.status_block_index);
1707 napi_schedule(&bnx2x_fp(bp, 0, napi));
1713 if (unlikely(status & 0x1)) {
1714 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1722 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1728 /* end of fast path */
1730 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1735 * General service functions
1738 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1741 u32 resource_bit = (1 << resource);
1742 int func = BP_FUNC(bp);
1743 u32 hw_lock_control_reg;
1746 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1755 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1757 hw_lock_control_reg =
1758 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1761 /* Validating that the resource is not already taken */
1762 lock_status = REG_RD(bp, hw_lock_control_reg);
1763 if (lock_status & resource_bit) {
1764 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1765 lock_status, resource_bit);
1769 /* Try for 5 second every 5ms */
1770 for (cnt = 0; cnt < 1000; cnt++) {
1771 /* Try to acquire the lock */
1772 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1773 lock_status = REG_RD(bp, hw_lock_control_reg);
1774 if (lock_status & resource_bit)
1779 DP(NETIF_MSG_HW, "Timeout\n");
1783 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1786 u32 resource_bit = (1 << resource);
1787 int func = BP_FUNC(bp);
1788 u32 hw_lock_control_reg;
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1805 /* Validating that the resource is currently taken */
1806 lock_status = REG_RD(bp, hw_lock_control_reg);
1807 if (!(lock_status & resource_bit)) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1813 REG_WR(bp, hw_lock_control_reg, resource_bit);
1817 /* HW Lock for shared dual port PHYs */
1818 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1822 mutex_lock(&bp->port.phy_mutex);
1824 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1825 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1829 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1831 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1833 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1834 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1837 mutex_unlock(&bp->port.phy_mutex);
1840 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1842 /* The GPIO should be swapped if swap register is set and active */
1843 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1844 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1845 int gpio_shift = gpio_num +
1846 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1847 u32 gpio_mask = (1 << gpio_shift);
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1856 /* read GPIO and mask except the float bits */
1857 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1860 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1862 gpio_num, gpio_shift);
1863 /* clear FLOAT and set CLR */
1864 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1865 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1868 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1869 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1870 gpio_num, gpio_shift);
1871 /* clear FLOAT and set SET */
1872 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1873 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1876 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1877 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1878 gpio_num, gpio_shift);
1880 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1887 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1893 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1895 u32 spio_mask = (1 << spio_num);
1898 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1899 (spio_num > MISC_REGISTERS_SPIO_7)) {
1900 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1905 /* read SPIO and mask except the float bits */
1906 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1909 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1910 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1911 /* clear FLOAT and set CLR */
1912 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1913 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1916 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1917 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1918 /* clear FLOAT and set SET */
1919 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1920 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1923 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1924 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1926 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1933 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1939 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1941 switch (bp->link_vars.ieee_fc &
1942 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1943 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1947 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1948 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1951 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1952 bp->port.advertising |= ADVERTISED_Asym_Pause;
1955 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1961 static void bnx2x_link_report(struct bnx2x *bp)
1963 if (bp->link_vars.link_up) {
1964 if (bp->state == BNX2X_STATE_OPEN)
1965 netif_carrier_on(bp->dev);
1966 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1968 printk("%d Mbps ", bp->link_vars.line_speed);
1970 if (bp->link_vars.duplex == DUPLEX_FULL)
1971 printk("full duplex");
1973 printk("half duplex");
1975 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1976 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1977 printk(", receive ");
1978 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1979 printk("& transmit ");
1981 printk(", transmit ");
1983 printk("flow control ON");
1987 } else { /* link_down */
1988 netif_carrier_off(bp->dev);
1989 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1993 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1995 if (!BP_NOMCP(bp)) {
1998 /* Initialize link parameters structure variables */
1999 /* It is recommended to turn off RX FC for jumbo frames
2000 for better performance */
2002 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2003 else if (bp->dev->mtu > 5000)
2004 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2006 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2008 bnx2x_acquire_phy_lock(bp);
2009 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2010 bnx2x_release_phy_lock(bp);
2012 bnx2x_calc_fc_adv(bp);
2014 if (bp->link_vars.link_up)
2015 bnx2x_link_report(bp);
2020 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2024 static void bnx2x_link_set(struct bnx2x *bp)
2026 if (!BP_NOMCP(bp)) {
2027 bnx2x_acquire_phy_lock(bp);
2028 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2029 bnx2x_release_phy_lock(bp);
2031 bnx2x_calc_fc_adv(bp);
2033 BNX2X_ERR("Bootcode is missing -not setting link\n");
2036 static void bnx2x__link_reset(struct bnx2x *bp)
2038 if (!BP_NOMCP(bp)) {
2039 bnx2x_acquire_phy_lock(bp);
2040 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2041 bnx2x_release_phy_lock(bp);
2043 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2046 static u8 bnx2x_link_test(struct bnx2x *bp)
2050 bnx2x_acquire_phy_lock(bp);
2051 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2052 bnx2x_release_phy_lock(bp);
2057 /* Calculates the sum of vn_min_rates.
2058 It's needed for further normalizing of the min_rates.
2063 0 - if all the min_rates are 0.
2064 In the later case fairness algorithm should be deactivated.
2065 If not all min_rates are zero then those that are zeroes will
2068 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2070 int i, port = BP_PORT(bp);
2074 for (i = 0; i < E1HVN_MAX; i++) {
2076 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2077 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2078 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2079 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2080 /* If min rate is zero - set it to 1 */
2082 vn_min_rate = DEF_MIN_RATE;
2086 wsum += vn_min_rate;
2090 /* ... only if all min rates are zeros - disable FAIRNESS */
2097 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2100 struct cmng_struct_per_port *m_cmng_port)
2102 u32 r_param = port_rate / 8;
2103 int port = BP_PORT(bp);
2106 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2108 /* Enable minmax only if we are in e1hmf mode */
2110 u32 fair_periodic_timeout_usec;
2113 /* Enable rate shaping and fairness */
2114 m_cmng_port->flags.cmng_vn_enable = 1;
2115 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2116 m_cmng_port->flags.rate_shaping_enable = 1;
2119 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2120 " fairness will be disabled\n");
2122 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2123 m_cmng_port->rs_vars.rs_periodic_timeout =
2124 RS_PERIODIC_TIMEOUT_USEC / 4;
2126 /* this is the threshold below which no timer arming will occur
2127 1.25 coefficient is for the threshold to be a little bigger
2128 than the real time, to compensate for timer in-accuracy */
2129 m_cmng_port->rs_vars.rs_threshold =
2130 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2132 /* resolution of fairness timer */
2133 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2134 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2135 t_fair = T_FAIR_COEF / port_rate;
2137 /* this is the threshold below which we won't arm
2138 the timer anymore */
2139 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2141 /* we multiply by 1e3/8 to get bytes/msec.
2142 We don't want the credits to pass a credit
2143 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2144 m_cmng_port->fair_vars.upper_bound =
2145 r_param * t_fair * FAIR_MEM;
2146 /* since each tick is 4 usec */
2147 m_cmng_port->fair_vars.fairness_timeout =
2148 fair_periodic_timeout_usec / 4;
2151 /* Disable rate shaping and fairness */
2152 m_cmng_port->flags.cmng_vn_enable = 0;
2153 m_cmng_port->flags.fairness_enable = 0;
2154 m_cmng_port->flags.rate_shaping_enable = 0;
2157 "Single function mode minmax will be disabled\n");
2160 /* Store it to internal memory */
2161 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2162 REG_WR(bp, BAR_XSTRORM_INTMEM +
2163 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2164 ((u32 *)(m_cmng_port))[i]);
2167 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2168 u32 wsum, u16 port_rate,
2169 struct cmng_struct_per_port *m_cmng_port)
2171 struct rate_shaping_vars_per_vn m_rs_vn;
2172 struct fairness_vars_per_vn m_fair_vn;
2173 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2174 u16 vn_min_rate, vn_max_rate;
2177 /* If function is hidden - set min and max to zeroes */
2178 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2183 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2184 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2185 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2186 if current min rate is zero - set it to 1.
2187 This is a requirement of the algorithm. */
2188 if ((vn_min_rate == 0) && wsum)
2189 vn_min_rate = DEF_MIN_RATE;
2190 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2191 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2194 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2195 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2197 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2198 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2200 /* global vn counter - maximal Mbps for this vn */
2201 m_rs_vn.vn_counter.rate = vn_max_rate;
2203 /* quota - number of bytes transmitted in this period */
2204 m_rs_vn.vn_counter.quota =
2205 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2207 #ifdef BNX2X_PER_PROT_QOS
2208 /* per protocol counter */
2209 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2210 /* maximal Mbps for this protocol */
2211 m_rs_vn.protocol_counters[protocol].rate =
2212 protocol_max_rate[protocol];
2213 /* the quota in each timer period -
2214 number of bytes transmitted in this period */
2215 m_rs_vn.protocol_counters[protocol].quota =
2216 (u32)(rs_periodic_timeout_usec *
2218 protocol_counters[protocol].rate/8));
2223 /* credit for each period of the fairness algorithm:
2224 number of bytes in T_FAIR (the vn share the port rate).
2225 wsum should not be larger than 10000, thus
2226 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2227 m_fair_vn.vn_credit_delta =
2228 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2229 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2230 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2231 m_fair_vn.vn_credit_delta);
2234 #ifdef BNX2X_PER_PROT_QOS
2236 u32 protocolWeightSum = 0;
2238 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2239 protocolWeightSum +=
2240 drvInit.protocol_min_rate[protocol];
2241 /* per protocol counter -
2242 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2243 if (protocolWeightSum > 0) {
2245 protocol < NUM_OF_PROTOCOLS; protocol++)
2246 /* credit for each period of the
2247 fairness algorithm - number of bytes in
2248 T_FAIR (the protocol share the vn rate) */
2249 m_fair_vn.protocol_credit_delta[protocol] =
2250 (u32)((vn_min_rate / 8) * t_fair *
2251 protocol_min_rate / protocolWeightSum);
2256 /* Store it to internal memory */
2257 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2258 REG_WR(bp, BAR_XSTRORM_INTMEM +
2259 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2260 ((u32 *)(&m_rs_vn))[i]);
2262 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2263 REG_WR(bp, BAR_XSTRORM_INTMEM +
2264 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2265 ((u32 *)(&m_fair_vn))[i]);
2268 /* This function is called upon link interrupt */
2269 static void bnx2x_link_attn(struct bnx2x *bp)
2273 /* Make sure that we are synced with the current statistics */
2274 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2276 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2278 if (bp->link_vars.link_up) {
2280 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2281 struct host_port_stats *pstats;
2283 pstats = bnx2x_sp(bp, port_stats);
2284 /* reset old bmac stats */
2285 memset(&(pstats->mac_stx[0]), 0,
2286 sizeof(struct mac_stx));
2288 if ((bp->state == BNX2X_STATE_OPEN) ||
2289 (bp->state == BNX2X_STATE_DISABLED))
2290 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
2299 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2300 if (vn == BP_E1HVN(bp))
2303 func = ((vn << 1) | BP_PORT(bp));
2305 /* Set the attention towards other drivers
2307 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2308 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2312 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2313 struct cmng_struct_per_port m_cmng_port;
2315 int port = BP_PORT(bp);
2317 /* Init RATE SHAPING and FAIRNESS contexts */
2318 wsum = bnx2x_calc_vn_wsum(bp);
2319 bnx2x_init_port_minmax(bp, (int)wsum,
2320 bp->link_vars.line_speed,
2323 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2324 bnx2x_init_vn_minmax(bp, 2*vn + port,
2325 wsum, bp->link_vars.line_speed,
2330 static void bnx2x__link_status_update(struct bnx2x *bp)
2332 if (bp->state != BNX2X_STATE_OPEN)
2335 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2337 if (bp->link_vars.link_up)
2338 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2340 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2342 /* indicate link status */
2343 bnx2x_link_report(bp);
2346 static void bnx2x_pmf_update(struct bnx2x *bp)
2348 int port = BP_PORT(bp);
2352 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2354 /* enable nig attention */
2355 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2356 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2357 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2359 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2367 * General service functions
2370 /* the slow path queue is odd since completions arrive on the fastpath ring */
2371 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2372 u32 data_hi, u32 data_lo, int common)
2374 int func = BP_FUNC(bp);
2376 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2377 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2378 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2379 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2380 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2382 #ifdef BNX2X_STOP_ON_ERROR
2383 if (unlikely(bp->panic))
2387 spin_lock_bh(&bp->spq_lock);
2389 if (!bp->spq_left) {
2390 BNX2X_ERR("BUG! SPQ ring full!\n");
2391 spin_unlock_bh(&bp->spq_lock);
2396 /* CID needs port number to be encoded int it */
2397 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2398 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2400 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2402 bp->spq_prod_bd->hdr.type |=
2403 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2405 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2406 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2410 if (bp->spq_prod_bd == bp->spq_last_bd) {
2411 bp->spq_prod_bd = bp->spq;
2412 bp->spq_prod_idx = 0;
2413 DP(NETIF_MSG_TIMER, "end of spq\n");
2420 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2423 spin_unlock_bh(&bp->spq_lock);
2427 /* acquire split MCP access lock register */
2428 static int bnx2x_acquire_alr(struct bnx2x *bp)
2435 for (j = 0; j < i*10; j++) {
2437 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2438 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2439 if (val & (1L << 31))
2444 if (!(val & (1L << 31))) {
2445 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2452 /* release split MCP access lock register */
2453 static void bnx2x_release_alr(struct bnx2x *bp)
2457 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2460 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2462 struct host_def_status_block *def_sb = bp->def_status_blk;
2465 barrier(); /* status block is written to by the chip */
2466 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2467 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2470 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2471 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2474 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2475 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2478 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2479 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2482 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2483 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2490 * slow path service functions
2493 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2495 int port = BP_PORT(bp);
2496 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2497 COMMAND_REG_ATTN_BITS_SET);
2498 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2499 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2500 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2501 NIG_REG_MASK_INTERRUPT_PORT0;
2504 if (bp->attn_state & asserted)
2505 BNX2X_ERR("IGU ERROR\n");
2507 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2508 aeu_mask = REG_RD(bp, aeu_addr);
2510 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2511 aeu_mask, asserted);
2512 aeu_mask &= ~(asserted & 0xff);
2513 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2515 REG_WR(bp, aeu_addr, aeu_mask);
2516 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2518 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2519 bp->attn_state |= asserted;
2520 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2522 if (asserted & ATTN_HARD_WIRED_MASK) {
2523 if (asserted & ATTN_NIG_FOR_FUNC) {
2525 bnx2x_acquire_phy_lock(bp);
2527 /* save nig interrupt mask */
2528 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2529 REG_WR(bp, nig_int_mask_addr, 0);
2531 bnx2x_link_attn(bp);
2533 /* handle unicore attn? */
2535 if (asserted & ATTN_SW_TIMER_4_FUNC)
2536 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2538 if (asserted & GPIO_2_FUNC)
2539 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2541 if (asserted & GPIO_3_FUNC)
2542 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2544 if (asserted & GPIO_4_FUNC)
2545 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2548 if (asserted & ATTN_GENERAL_ATTN_1) {
2549 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2550 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2552 if (asserted & ATTN_GENERAL_ATTN_2) {
2553 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2554 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2556 if (asserted & ATTN_GENERAL_ATTN_3) {
2557 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2558 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2561 if (asserted & ATTN_GENERAL_ATTN_4) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2565 if (asserted & ATTN_GENERAL_ATTN_5) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2569 if (asserted & ATTN_GENERAL_ATTN_6) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2575 } /* if hardwired */
2577 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2579 REG_WR(bp, hc_addr, asserted);
2581 /* now set back the mask */
2582 if (asserted & ATTN_NIG_FOR_FUNC) {
2583 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2584 bnx2x_release_phy_lock(bp);
2588 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2590 int port = BP_PORT(bp);
2594 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2595 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2597 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2599 val = REG_RD(bp, reg_offset);
2600 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2601 REG_WR(bp, reg_offset, val);
2603 BNX2X_ERR("SPIO5 hw attention\n");
2605 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2606 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2607 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2608 /* Fan failure attention */
2610 /* The PHY reset is controlled by GPIO 1 */
2611 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2612 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2613 /* Low power mode is controlled by GPIO 2 */
2614 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2615 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2616 /* mark the failure */
2617 bp->link_params.ext_phy_config &=
2618 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2619 bp->link_params.ext_phy_config |=
2620 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2622 dev_info.port_hw_config[port].
2623 external_phy_config,
2624 bp->link_params.ext_phy_config);
2625 /* log the failure */
2626 printk(KERN_ERR PFX "Fan Failure on Network"
2627 " Controller %s has caused the driver to"
2628 " shutdown the card to prevent permanent"
2629 " damage. Please contact Dell Support for"
2630 " assistance\n", bp->dev->name);
2638 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2640 val = REG_RD(bp, reg_offset);
2641 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2642 REG_WR(bp, reg_offset, val);
2644 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2645 (attn & HW_INTERRUT_ASSERT_SET_0));
2650 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2654 if (attn & BNX2X_DOORQ_ASSERT) {
2656 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2657 BNX2X_ERR("DB hw attention 0x%x\n", val);
2658 /* DORQ discard attention */
2660 BNX2X_ERR("FATAL error from DORQ\n");
2663 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2665 int port = BP_PORT(bp);
2668 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2669 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2671 val = REG_RD(bp, reg_offset);
2672 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2673 REG_WR(bp, reg_offset, val);
2675 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2676 (attn & HW_INTERRUT_ASSERT_SET_1));
2681 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2685 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2687 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2688 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2689 /* CFC error attention */
2691 BNX2X_ERR("FATAL error from CFC\n");
2694 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2696 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2697 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2698 /* RQ_USDMDP_FIFO_OVERFLOW */
2700 BNX2X_ERR("FATAL error from PXP\n");
2703 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2705 int port = BP_PORT(bp);
2708 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2709 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2711 val = REG_RD(bp, reg_offset);
2712 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2713 REG_WR(bp, reg_offset, val);
2715 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2716 (attn & HW_INTERRUT_ASSERT_SET_2));
2721 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2725 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2727 if (attn & BNX2X_PMF_LINK_ASSERT) {
2728 int func = BP_FUNC(bp);
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2731 bnx2x__link_status_update(bp);
2732 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2734 bnx2x_pmf_update(bp);
2736 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2738 BNX2X_ERR("MC assert!\n");
2739 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2740 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2741 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2742 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2745 } else if (attn & BNX2X_MCP_ASSERT) {
2747 BNX2X_ERR("MCP assert!\n");
2748 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2752 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2755 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2756 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2757 if (attn & BNX2X_GRC_TIMEOUT) {
2758 val = CHIP_IS_E1H(bp) ?
2759 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2760 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2762 if (attn & BNX2X_GRC_RSV) {
2763 val = CHIP_IS_E1H(bp) ?
2764 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2765 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2767 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2771 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2773 struct attn_route attn;
2774 struct attn_route group_mask;
2775 int port = BP_PORT(bp);
2781 /* need to take HW lock because MCP or other port might also
2782 try to handle this event */
2783 bnx2x_acquire_alr(bp);
2785 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2786 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2787 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2788 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2789 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2790 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2792 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2793 if (deasserted & (1 << index)) {
2794 group_mask = bp->attn_group[index];
2796 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2797 index, group_mask.sig[0], group_mask.sig[1],
2798 group_mask.sig[2], group_mask.sig[3]);
2800 bnx2x_attn_int_deasserted3(bp,
2801 attn.sig[3] & group_mask.sig[3]);
2802 bnx2x_attn_int_deasserted1(bp,
2803 attn.sig[1] & group_mask.sig[1]);
2804 bnx2x_attn_int_deasserted2(bp,
2805 attn.sig[2] & group_mask.sig[2]);
2806 bnx2x_attn_int_deasserted0(bp,
2807 attn.sig[0] & group_mask.sig[0]);
2809 if ((attn.sig[0] & group_mask.sig[0] &
2810 HW_PRTY_ASSERT_SET_0) ||
2811 (attn.sig[1] & group_mask.sig[1] &
2812 HW_PRTY_ASSERT_SET_1) ||
2813 (attn.sig[2] & group_mask.sig[2] &
2814 HW_PRTY_ASSERT_SET_2))
2815 BNX2X_ERR("FATAL HW block parity attention\n");
2819 bnx2x_release_alr(bp);
2821 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2824 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2826 REG_WR(bp, reg_addr, val);
2828 if (~bp->attn_state & deasserted)
2829 BNX2X_ERR("IGU ERROR\n");
2831 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2832 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2834 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2835 aeu_mask = REG_RD(bp, reg_addr);
2837 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2838 aeu_mask, deasserted);
2839 aeu_mask |= (deasserted & 0xff);
2840 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2842 REG_WR(bp, reg_addr, aeu_mask);
2843 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2845 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2846 bp->attn_state &= ~deasserted;
2847 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2850 static void bnx2x_attn_int(struct bnx2x *bp)
2852 /* read local copy of bits */
2853 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2855 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2857 u32 attn_state = bp->attn_state;
2859 /* look for changed bits */
2860 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2861 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2864 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2865 attn_bits, attn_ack, asserted, deasserted);
2867 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2868 BNX2X_ERR("BAD attention state\n");
2870 /* handle bits that were raised */
2872 bnx2x_attn_int_asserted(bp, asserted);
2875 bnx2x_attn_int_deasserted(bp, deasserted);
2878 static void bnx2x_sp_task(struct work_struct *work)
2880 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2884 /* Return here if interrupt is disabled */
2885 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2886 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2890 status = bnx2x_update_dsb_idx(bp);
2891 /* if (status == 0) */
2892 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2894 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2900 /* CStorm events: query_stats, port delete ramrod */
2902 bp->stats_pending = 0;
2904 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2906 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2908 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2910 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2912 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2917 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2919 struct net_device *dev = dev_instance;
2920 struct bnx2x *bp = netdev_priv(dev);
2922 /* Return here if interrupt is disabled */
2923 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2924 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2928 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2930 #ifdef BNX2X_STOP_ON_ERROR
2931 if (unlikely(bp->panic))
2935 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2940 /* end of slow path */
2944 /****************************************************************************
2946 ****************************************************************************/
2948 /* sum[hi:lo] += add[hi:lo] */
2949 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2952 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2955 /* difference = minuend - subtrahend */
2956 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2958 if (m_lo < s_lo) { \
2960 d_hi = m_hi - s_hi; \
2962 /* we can 'loan' 1 */ \
2964 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2966 /* m_hi <= s_hi */ \
2971 /* m_lo >= s_lo */ \
2972 if (m_hi < s_hi) { \
2976 /* m_hi >= s_hi */ \
2977 d_hi = m_hi - s_hi; \
2978 d_lo = m_lo - s_lo; \
2983 #define UPDATE_STAT64(s, t) \
2985 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2986 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2987 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2988 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2989 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2990 pstats->mac_stx[1].t##_lo, diff.lo); \
2993 #define UPDATE_STAT64_NIG(s, t) \
2995 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2996 diff.lo, new->s##_lo, old->s##_lo); \
2997 ADD_64(estats->t##_hi, diff.hi, \
2998 estats->t##_lo, diff.lo); \
3001 /* sum[hi:lo] += add */
3002 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3005 s_hi += (s_lo < a) ? 1 : 0; \
3008 #define UPDATE_EXTEND_STAT(s) \
3010 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3011 pstats->mac_stx[1].s##_lo, \
3015 #define UPDATE_EXTEND_TSTAT(s, t) \
3017 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3018 old_tclient->s = le32_to_cpu(tclient->s); \
3019 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3022 #define UPDATE_EXTEND_XSTAT(s, t) \
3024 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3025 old_xclient->s = le32_to_cpu(xclient->s); \
3026 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3030 * General service functions
3033 static inline long bnx2x_hilo(u32 *hiref)
3035 u32 lo = *(hiref + 1);
3036 #if (BITS_PER_LONG == 64)
3039 return HILO_U64(hi, lo);
3046 * Init service functions
3049 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3051 if (!bp->stats_pending) {
3052 struct eth_query_ramrod_data ramrod_data = {0};
3055 ramrod_data.drv_counter = bp->stats_counter++;
3056 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3057 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3059 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3060 ((u32 *)&ramrod_data)[1],
3061 ((u32 *)&ramrod_data)[0], 0);
3063 /* stats ramrod has it's own slot on the spq */
3065 bp->stats_pending = 1;
3070 static void bnx2x_stats_init(struct bnx2x *bp)
3072 int port = BP_PORT(bp);
3074 bp->executer_idx = 0;
3075 bp->stats_counter = 0;
3079 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3081 bp->port.port_stx = 0;
3082 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3084 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3085 bp->port.old_nig_stats.brb_discard =
3086 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3087 bp->port.old_nig_stats.brb_truncate =
3088 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3089 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3090 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3091 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3092 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3094 /* function stats */
3095 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3096 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3097 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3098 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3100 bp->stats_state = STATS_STATE_DISABLED;
3101 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3102 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3105 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3107 struct dmae_command *dmae = &bp->stats_dmae;
3108 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3110 *stats_comp = DMAE_COMP_VAL;
3113 if (bp->executer_idx) {
3114 int loader_idx = PMF_DMAE_C(bp);
3116 memset(dmae, 0, sizeof(struct dmae_command));
3118 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3119 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3120 DMAE_CMD_DST_RESET |
3122 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3124 DMAE_CMD_ENDIANITY_DW_SWAP |
3126 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3128 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3129 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3130 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3131 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3132 sizeof(struct dmae_command) *
3133 (loader_idx + 1)) >> 2;
3134 dmae->dst_addr_hi = 0;
3135 dmae->len = sizeof(struct dmae_command) >> 2;
3138 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3139 dmae->comp_addr_hi = 0;
3143 bnx2x_post_dmae(bp, dmae, loader_idx);
3145 } else if (bp->func_stx) {
3147 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3151 static int bnx2x_stats_comp(struct bnx2x *bp)
3153 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3157 while (*stats_comp != DMAE_COMP_VAL) {
3159 BNX2X_ERR("timeout waiting for stats finished\n");
3169 * Statistics service functions
3172 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3174 struct dmae_command *dmae;
3176 int loader_idx = PMF_DMAE_C(bp);
3177 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3181 BNX2X_ERR("BUG!\n");
3185 bp->executer_idx = 0;
3187 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3193 DMAE_CMD_ENDIANITY_DW_SWAP |
3195 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3196 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3198 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3200 dmae->src_addr_lo = bp->port.port_stx >> 2;
3201 dmae->src_addr_hi = 0;
3202 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3203 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3204 dmae->len = DMAE_LEN32_RD_MAX;
3205 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3206 dmae->comp_addr_hi = 0;
3209 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3210 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3211 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3212 dmae->src_addr_hi = 0;
3213 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3214 DMAE_LEN32_RD_MAX * 4);
3215 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3216 DMAE_LEN32_RD_MAX * 4);
3217 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3218 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3219 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3220 dmae->comp_val = DMAE_COMP_VAL;
3223 bnx2x_hw_stats_post(bp);
3224 bnx2x_stats_comp(bp);
3227 static void bnx2x_port_stats_init(struct bnx2x *bp)
3229 struct dmae_command *dmae;
3230 int port = BP_PORT(bp);
3231 int vn = BP_E1HVN(bp);
3233 int loader_idx = PMF_DMAE_C(bp);
3235 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3238 if (!bp->link_vars.link_up || !bp->port.pmf) {
3239 BNX2X_ERR("BUG!\n");
3243 bp->executer_idx = 0;
3246 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3247 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3248 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3250 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3252 DMAE_CMD_ENDIANITY_DW_SWAP |
3254 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3255 (vn << DMAE_CMD_E1HVN_SHIFT));
3257 if (bp->port.port_stx) {
3259 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3260 dmae->opcode = opcode;
3261 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3262 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3263 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3264 dmae->dst_addr_hi = 0;
3265 dmae->len = sizeof(struct host_port_stats) >> 2;
3266 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3267 dmae->comp_addr_hi = 0;
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = opcode;
3275 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3276 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3277 dmae->dst_addr_lo = bp->func_stx >> 2;
3278 dmae->dst_addr_hi = 0;
3279 dmae->len = sizeof(struct host_func_stats) >> 2;
3280 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3281 dmae->comp_addr_hi = 0;
3286 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3287 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3288 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3290 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3292 DMAE_CMD_ENDIANITY_DW_SWAP |
3294 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3295 (vn << DMAE_CMD_E1HVN_SHIFT));
3297 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3299 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3300 NIG_REG_INGRESS_BMAC0_MEM);
3302 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3303 BIGMAC_REGISTER_TX_STAT_GTBYT */
3304 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305 dmae->opcode = opcode;
3306 dmae->src_addr_lo = (mac_addr +
3307 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3308 dmae->src_addr_hi = 0;
3309 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3310 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3311 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3312 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3313 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3314 dmae->comp_addr_hi = 0;
3317 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3318 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3320 dmae->opcode = opcode;
3321 dmae->src_addr_lo = (mac_addr +
3322 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3323 dmae->src_addr_hi = 0;
3324 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3325 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3326 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3327 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3328 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3329 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3330 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3331 dmae->comp_addr_hi = 0;
3334 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3336 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3338 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (mac_addr +
3342 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3346 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3347 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3348 dmae->comp_addr_hi = 0;
3351 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353 dmae->opcode = opcode;
3354 dmae->src_addr_lo = (mac_addr +
3355 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3356 dmae->src_addr_hi = 0;
3357 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3358 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3360 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3366 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3373 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3375 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3376 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378 dmae->comp_addr_hi = 0;
3383 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3384 dmae->opcode = opcode;
3385 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3386 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3387 dmae->src_addr_hi = 0;
3388 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3390 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392 dmae->comp_addr_hi = 0;
3395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3396 dmae->opcode = opcode;
3397 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3398 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3399 dmae->src_addr_hi = 0;
3400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3401 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3403 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3404 dmae->len = (2*sizeof(u32)) >> 2;
3405 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3406 dmae->comp_addr_hi = 0;
3409 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3410 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3411 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3412 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3414 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3416 DMAE_CMD_ENDIANITY_DW_SWAP |
3418 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3419 (vn << DMAE_CMD_E1HVN_SHIFT));
3420 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3421 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3422 dmae->src_addr_hi = 0;
3423 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3424 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3425 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3426 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3427 dmae->len = (2*sizeof(u32)) >> 2;
3428 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3429 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3430 dmae->comp_val = DMAE_COMP_VAL;
3435 static void bnx2x_func_stats_init(struct bnx2x *bp)
3437 struct dmae_command *dmae = &bp->stats_dmae;
3438 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3441 if (!bp->func_stx) {
3442 BNX2X_ERR("BUG!\n");
3446 bp->executer_idx = 0;
3447 memset(dmae, 0, sizeof(struct dmae_command));
3449 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3450 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3451 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3453 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3455 DMAE_CMD_ENDIANITY_DW_SWAP |
3457 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3458 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3459 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3460 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3461 dmae->dst_addr_lo = bp->func_stx >> 2;
3462 dmae->dst_addr_hi = 0;
3463 dmae->len = sizeof(struct host_func_stats) >> 2;
3464 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3465 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3466 dmae->comp_val = DMAE_COMP_VAL;
3471 static void bnx2x_stats_start(struct bnx2x *bp)
3474 bnx2x_port_stats_init(bp);
3476 else if (bp->func_stx)
3477 bnx2x_func_stats_init(bp);
3479 bnx2x_hw_stats_post(bp);
3480 bnx2x_storm_stats_post(bp);
3483 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3485 bnx2x_stats_comp(bp);
3486 bnx2x_stats_pmf_update(bp);
3487 bnx2x_stats_start(bp);
3490 static void bnx2x_stats_restart(struct bnx2x *bp)
3492 bnx2x_stats_comp(bp);
3493 bnx2x_stats_start(bp);
3496 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3498 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3499 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3500 struct regpair diff;
3502 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3503 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3504 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3505 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3506 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3507 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3508 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3509 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3510 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3511 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3512 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3513 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3514 UPDATE_STAT64(tx_stat_gt127,
3515 tx_stat_etherstatspkts65octetsto127octets);
3516 UPDATE_STAT64(tx_stat_gt255,
3517 tx_stat_etherstatspkts128octetsto255octets);
3518 UPDATE_STAT64(tx_stat_gt511,
3519 tx_stat_etherstatspkts256octetsto511octets);
3520 UPDATE_STAT64(tx_stat_gt1023,
3521 tx_stat_etherstatspkts512octetsto1023octets);
3522 UPDATE_STAT64(tx_stat_gt1518,
3523 tx_stat_etherstatspkts1024octetsto1522octets);
3524 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3525 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3526 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3527 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3528 UPDATE_STAT64(tx_stat_gterr,
3529 tx_stat_dot3statsinternalmactransmiterrors);
3530 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3533 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3535 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3536 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3538 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3539 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3540 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3541 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3542 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3543 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3544 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3545 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3546 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3547 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3548 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3549 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3550 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3551 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3552 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3553 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3554 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3555 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3556 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3557 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3558 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3559 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3560 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3561 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3562 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3563 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3564 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3565 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3566 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3567 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3568 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3571 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3573 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3574 struct nig_stats *old = &(bp->port.old_nig_stats);
3575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3576 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3577 struct regpair diff;
3579 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3580 bnx2x_bmac_stats_update(bp);
3582 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3583 bnx2x_emac_stats_update(bp);
3585 else { /* unreached */
3586 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3590 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3591 new->brb_discard - old->brb_discard);
3592 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3593 new->brb_truncate - old->brb_truncate);
3595 UPDATE_STAT64_NIG(egress_mac_pkt0,
3596 etherstatspkts1024octetsto1522octets);
3597 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3599 memcpy(old, new, sizeof(struct nig_stats));
3601 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3602 sizeof(struct mac_stx));
3603 estats->brb_drop_hi = pstats->brb_drop_hi;
3604 estats->brb_drop_lo = pstats->brb_drop_lo;
3606 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3611 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3613 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3614 int cl_id = BP_CL_ID(bp);
3615 struct tstorm_per_port_stats *tport =
3616 &stats->tstorm_common.port_statistics;
3617 struct tstorm_per_client_stats *tclient =
3618 &stats->tstorm_common.client_statistics[cl_id];
3619 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3620 struct xstorm_per_client_stats *xclient =
3621 &stats->xstorm_common.client_statistics[cl_id];
3622 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3623 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3624 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3627 /* are storm stats valid? */
3628 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3629 bp->stats_counter) {
3630 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3631 " tstorm counter (%d) != stats_counter (%d)\n",
3632 tclient->stats_counter, bp->stats_counter);
3635 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3636 bp->stats_counter) {
3637 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3638 " xstorm counter (%d) != stats_counter (%d)\n",
3639 xclient->stats_counter, bp->stats_counter);
3643 fstats->total_bytes_received_hi =
3644 fstats->valid_bytes_received_hi =
3645 le32_to_cpu(tclient->total_rcv_bytes.hi);
3646 fstats->total_bytes_received_lo =
3647 fstats->valid_bytes_received_lo =
3648 le32_to_cpu(tclient->total_rcv_bytes.lo);
3650 estats->error_bytes_received_hi =
3651 le32_to_cpu(tclient->rcv_error_bytes.hi);
3652 estats->error_bytes_received_lo =
3653 le32_to_cpu(tclient->rcv_error_bytes.lo);
3654 ADD_64(estats->error_bytes_received_hi,
3655 estats->rx_stat_ifhcinbadoctets_hi,
3656 estats->error_bytes_received_lo,
3657 estats->rx_stat_ifhcinbadoctets_lo);
3659 ADD_64(fstats->total_bytes_received_hi,
3660 estats->error_bytes_received_hi,
3661 fstats->total_bytes_received_lo,
3662 estats->error_bytes_received_lo);
3664 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3665 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3666 total_multicast_packets_received);
3667 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3668 total_broadcast_packets_received);
3670 fstats->total_bytes_transmitted_hi =
3671 le32_to_cpu(xclient->total_sent_bytes.hi);
3672 fstats->total_bytes_transmitted_lo =
3673 le32_to_cpu(xclient->total_sent_bytes.lo);
3675 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3676 total_unicast_packets_transmitted);
3677 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3678 total_multicast_packets_transmitted);
3679 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3680 total_broadcast_packets_transmitted);
3682 memcpy(estats, &(fstats->total_bytes_received_hi),
3683 sizeof(struct host_func_stats) - 2*sizeof(u32));
3685 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3686 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3687 estats->brb_truncate_discard =
3688 le32_to_cpu(tport->brb_truncate_discard);
3689 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3691 old_tclient->rcv_unicast_bytes.hi =
3692 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3693 old_tclient->rcv_unicast_bytes.lo =
3694 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3695 old_tclient->rcv_broadcast_bytes.hi =
3696 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3697 old_tclient->rcv_broadcast_bytes.lo =
3698 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3699 old_tclient->rcv_multicast_bytes.hi =
3700 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3701 old_tclient->rcv_multicast_bytes.lo =
3702 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3703 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3705 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3706 old_tclient->packets_too_big_discard =
3707 le32_to_cpu(tclient->packets_too_big_discard);
3708 estats->no_buff_discard =
3709 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3710 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3712 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3713 old_xclient->unicast_bytes_sent.hi =
3714 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3715 old_xclient->unicast_bytes_sent.lo =
3716 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3717 old_xclient->multicast_bytes_sent.hi =
3718 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3719 old_xclient->multicast_bytes_sent.lo =
3720 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3721 old_xclient->broadcast_bytes_sent.hi =
3722 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3723 old_xclient->broadcast_bytes_sent.lo =
3724 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3726 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3731 static void bnx2x_net_stats_update(struct bnx2x *bp)
3733 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3734 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3735 struct net_device_stats *nstats = &bp->dev->stats;
3737 nstats->rx_packets =
3738 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3739 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3740 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3742 nstats->tx_packets =
3743 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3744 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3745 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3747 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3749 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3751 nstats->rx_dropped = old_tclient->checksum_discard +
3752 estats->mac_discard;
3753 nstats->tx_dropped = 0;
3756 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3758 nstats->collisions =
3759 estats->tx_stat_dot3statssinglecollisionframes_lo +
3760 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3761 estats->tx_stat_dot3statslatecollisions_lo +
3762 estats->tx_stat_dot3statsexcessivecollisions_lo;
3764 estats->jabber_packets_received =
3765 old_tclient->packets_too_big_discard +
3766 estats->rx_stat_dot3statsframestoolong_lo;
3768 nstats->rx_length_errors =
3769 estats->rx_stat_etherstatsundersizepkts_lo +
3770 estats->jabber_packets_received;
3771 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3772 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3773 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3774 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3775 nstats->rx_missed_errors = estats->xxoverflow_discard;
3777 nstats->rx_errors = nstats->rx_length_errors +
3778 nstats->rx_over_errors +
3779 nstats->rx_crc_errors +
3780 nstats->rx_frame_errors +
3781 nstats->rx_fifo_errors +
3782 nstats->rx_missed_errors;
3784 nstats->tx_aborted_errors =
3785 estats->tx_stat_dot3statslatecollisions_lo +
3786 estats->tx_stat_dot3statsexcessivecollisions_lo;
3787 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3788 nstats->tx_fifo_errors = 0;
3789 nstats->tx_heartbeat_errors = 0;
3790 nstats->tx_window_errors = 0;
3792 nstats->tx_errors = nstats->tx_aborted_errors +
3793 nstats->tx_carrier_errors;
3796 static void bnx2x_stats_update(struct bnx2x *bp)
3798 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3801 if (*stats_comp != DMAE_COMP_VAL)
3805 update = (bnx2x_hw_stats_update(bp) == 0);
3807 update |= (bnx2x_storm_stats_update(bp) == 0);
3810 bnx2x_net_stats_update(bp);
3813 if (bp->stats_pending) {
3814 bp->stats_pending++;
3815 if (bp->stats_pending == 3) {
3816 BNX2X_ERR("stats not updated for 3 times\n");
3823 if (bp->msglevel & NETIF_MSG_TIMER) {
3824 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3825 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3826 struct net_device_stats *nstats = &bp->dev->stats;
3829 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3830 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3832 bnx2x_tx_avail(bp->fp),
3833 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3834 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3836 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3837 bp->fp->rx_comp_cons),
3838 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3839 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3840 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3841 estats->driver_xoff, estats->brb_drop_lo);
3842 printk(KERN_DEBUG "tstats: checksum_discard %u "
3843 "packets_too_big_discard %u no_buff_discard %u "
3844 "mac_discard %u mac_filter_discard %u "
3845 "xxovrflow_discard %u brb_truncate_discard %u "
3846 "ttl0_discard %u\n",
3847 old_tclient->checksum_discard,
3848 old_tclient->packets_too_big_discard,
3849 old_tclient->no_buff_discard, estats->mac_discard,
3850 estats->mac_filter_discard, estats->xxoverflow_discard,
3851 estats->brb_truncate_discard,
3852 old_tclient->ttl0_discard);
3854 for_each_queue(bp, i) {
3855 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3856 bnx2x_fp(bp, i, tx_pkt),
3857 bnx2x_fp(bp, i, rx_pkt),
3858 bnx2x_fp(bp, i, rx_calls));
3862 bnx2x_hw_stats_post(bp);
3863 bnx2x_storm_stats_post(bp);
3866 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3868 struct dmae_command *dmae;
3870 int loader_idx = PMF_DMAE_C(bp);
3871 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3873 bp->executer_idx = 0;
3875 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3877 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3879 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3881 DMAE_CMD_ENDIANITY_DW_SWAP |
3883 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3884 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3886 if (bp->port.port_stx) {
3888 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3890 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3892 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3893 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3894 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3895 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3896 dmae->dst_addr_hi = 0;
3897 dmae->len = sizeof(struct host_port_stats) >> 2;
3899 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3900 dmae->comp_addr_hi = 0;
3903 dmae->comp_addr_lo =
3904 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3905 dmae->comp_addr_hi =
3906 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3907 dmae->comp_val = DMAE_COMP_VAL;
3915 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3916 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3917 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3918 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3919 dmae->dst_addr_lo = bp->func_stx >> 2;
3920 dmae->dst_addr_hi = 0;
3921 dmae->len = sizeof(struct host_func_stats) >> 2;
3922 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3923 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3924 dmae->comp_val = DMAE_COMP_VAL;
3930 static void bnx2x_stats_stop(struct bnx2x *bp)
3934 bnx2x_stats_comp(bp);
3937 update = (bnx2x_hw_stats_update(bp) == 0);
3939 update |= (bnx2x_storm_stats_update(bp) == 0);
3942 bnx2x_net_stats_update(bp);
3945 bnx2x_port_stats_stop(bp);
3947 bnx2x_hw_stats_post(bp);
3948 bnx2x_stats_comp(bp);
3952 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3956 static const struct {
3957 void (*action)(struct bnx2x *bp);
3958 enum bnx2x_stats_state next_state;
3959 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3962 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3963 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3964 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3965 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3968 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3969 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3970 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3971 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3975 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3977 enum bnx2x_stats_state state = bp->stats_state;
3979 bnx2x_stats_stm[state][event].action(bp);
3980 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3982 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3983 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3984 state, event, bp->stats_state);
3987 static void bnx2x_timer(unsigned long data)
3989 struct bnx2x *bp = (struct bnx2x *) data;
3991 if (!netif_running(bp->dev))
3994 if (atomic_read(&bp->intr_sem) != 0)
3998 struct bnx2x_fastpath *fp = &bp->fp[0];
4001 bnx2x_tx_int(fp, 1000);
4002 rc = bnx2x_rx_int(fp, 1000);
4005 if (!BP_NOMCP(bp)) {
4006 int func = BP_FUNC(bp);
4010 ++bp->fw_drv_pulse_wr_seq;
4011 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4012 /* TBD - add SYSTEM_TIME */
4013 drv_pulse = bp->fw_drv_pulse_wr_seq;
4014 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4016 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4017 MCP_PULSE_SEQ_MASK);
4018 /* The delta between driver pulse and mcp response
4019 * should be 1 (before mcp response) or 0 (after mcp response)
4021 if ((drv_pulse != mcp_pulse) &&
4022 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4023 /* someone lost a heartbeat... */
4024 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4025 drv_pulse, mcp_pulse);
4029 if ((bp->state == BNX2X_STATE_OPEN) ||
4030 (bp->state == BNX2X_STATE_DISABLED))
4031 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4034 mod_timer(&bp->timer, jiffies + bp->current_interval);
4037 /* end of Statistics */
4042 * nic init service functions
4045 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4047 int port = BP_PORT(bp);
4049 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4050 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4051 sizeof(struct ustorm_status_block)/4);
4052 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4053 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4054 sizeof(struct cstorm_status_block)/4);
4057 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4058 dma_addr_t mapping, int sb_id)
4060 int port = BP_PORT(bp);
4061 int func = BP_FUNC(bp);
4066 section = ((u64)mapping) + offsetof(struct host_status_block,
4068 sb->u_status_block.status_block_id = sb_id;
4070 REG_WR(bp, BAR_USTRORM_INTMEM +
4071 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4072 REG_WR(bp, BAR_USTRORM_INTMEM +
4073 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4075 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4076 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4078 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4079 REG_WR16(bp, BAR_USTRORM_INTMEM +
4080 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4083 section = ((u64)mapping) + offsetof(struct host_status_block,
4085 sb->c_status_block.status_block_id = sb_id;
4087 REG_WR(bp, BAR_CSTRORM_INTMEM +
4088 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4089 REG_WR(bp, BAR_CSTRORM_INTMEM +
4090 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4092 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4093 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4095 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4096 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4097 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4099 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4102 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4104 int func = BP_FUNC(bp);
4106 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4107 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4108 sizeof(struct ustorm_def_status_block)/4);
4109 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4110 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4111 sizeof(struct cstorm_def_status_block)/4);
4112 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4113 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4114 sizeof(struct xstorm_def_status_block)/4);
4115 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4116 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4117 sizeof(struct tstorm_def_status_block)/4);
4120 static void bnx2x_init_def_sb(struct bnx2x *bp,
4121 struct host_def_status_block *def_sb,
4122 dma_addr_t mapping, int sb_id)
4124 int port = BP_PORT(bp);
4125 int func = BP_FUNC(bp);
4126 int index, val, reg_offset;
4130 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4131 atten_status_block);
4132 def_sb->atten_status_block.status_block_id = sb_id;
4136 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4137 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4139 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4140 bp->attn_group[index].sig[0] = REG_RD(bp,
4141 reg_offset + 0x10*index);
4142 bp->attn_group[index].sig[1] = REG_RD(bp,
4143 reg_offset + 0x4 + 0x10*index);
4144 bp->attn_group[index].sig[2] = REG_RD(bp,
4145 reg_offset + 0x8 + 0x10*index);
4146 bp->attn_group[index].sig[3] = REG_RD(bp,
4147 reg_offset + 0xc + 0x10*index);
4150 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4151 HC_REG_ATTN_MSG0_ADDR_L);
4153 REG_WR(bp, reg_offset, U64_LO(section));
4154 REG_WR(bp, reg_offset + 4, U64_HI(section));
4156 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4158 val = REG_RD(bp, reg_offset);
4160 REG_WR(bp, reg_offset, val);
4163 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4164 u_def_status_block);
4165 def_sb->u_def_status_block.status_block_id = sb_id;
4167 REG_WR(bp, BAR_USTRORM_INTMEM +
4168 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4169 REG_WR(bp, BAR_USTRORM_INTMEM +
4170 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4172 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4173 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4175 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4176 REG_WR16(bp, BAR_USTRORM_INTMEM +
4177 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4180 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4181 c_def_status_block);
4182 def_sb->c_def_status_block.status_block_id = sb_id;
4184 REG_WR(bp, BAR_CSTRORM_INTMEM +
4185 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4186 REG_WR(bp, BAR_CSTRORM_INTMEM +
4187 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4189 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4190 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4192 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4194 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4197 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4198 t_def_status_block);
4199 def_sb->t_def_status_block.status_block_id = sb_id;
4201 REG_WR(bp, BAR_TSTRORM_INTMEM +
4202 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4203 REG_WR(bp, BAR_TSTRORM_INTMEM +
4204 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4206 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4207 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4209 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4210 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4211 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4214 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4215 x_def_status_block);
4216 def_sb->x_def_status_block.status_block_id = sb_id;
4218 REG_WR(bp, BAR_XSTRORM_INTMEM +
4219 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4220 REG_WR(bp, BAR_XSTRORM_INTMEM +
4221 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4223 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4224 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4226 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4227 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4228 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4230 bp->stats_pending = 0;
4231 bp->set_mac_pending = 0;
4233 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4236 static void bnx2x_update_coalesce(struct bnx2x *bp)
4238 int port = BP_PORT(bp);
4241 for_each_queue(bp, i) {
4242 int sb_id = bp->fp[i].sb_id;
4244 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4245 REG_WR8(bp, BAR_USTRORM_INTMEM +
4246 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4247 U_SB_ETH_RX_CQ_INDEX),
4249 REG_WR16(bp, BAR_USTRORM_INTMEM +
4250 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4251 U_SB_ETH_RX_CQ_INDEX),
4252 bp->rx_ticks ? 0 : 1);
4254 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4255 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4256 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4257 C_SB_ETH_TX_CQ_INDEX),
4259 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4260 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4261 C_SB_ETH_TX_CQ_INDEX),
4262 bp->tx_ticks ? 0 : 1);
4266 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4267 struct bnx2x_fastpath *fp, int last)
4271 for (i = 0; i < last; i++) {
4272 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4273 struct sk_buff *skb = rx_buf->skb;
4276 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4280 if (fp->tpa_state[i] == BNX2X_TPA_START)
4281 pci_unmap_single(bp->pdev,
4282 pci_unmap_addr(rx_buf, mapping),
4284 PCI_DMA_FROMDEVICE);
4291 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4293 int func = BP_FUNC(bp);
4294 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4295 ETH_MAX_AGGREGATION_QUEUES_E1H;
4296 u16 ring_prod, cqe_ring_prod;
4299 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4301 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4303 if (bp->flags & TPA_ENABLE_FLAG) {
4305 for_each_rx_queue(bp, j) {
4306 struct bnx2x_fastpath *fp = &bp->fp[j];
4308 for (i = 0; i < max_agg_queues; i++) {
4309 fp->tpa_pool[i].skb =
4310 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4311 if (!fp->tpa_pool[i].skb) {
4312 BNX2X_ERR("Failed to allocate TPA "
4313 "skb pool for queue[%d] - "
4314 "disabling TPA on this "
4316 bnx2x_free_tpa_pool(bp, fp, i);
4317 fp->disable_tpa = 1;
4320 pci_unmap_addr_set((struct sw_rx_bd *)
4321 &bp->fp->tpa_pool[i],
4323 fp->tpa_state[i] = BNX2X_TPA_STOP;
4328 for_each_rx_queue(bp, j) {
4329 struct bnx2x_fastpath *fp = &bp->fp[j];
4332 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4333 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4335 /* "next page" elements initialization */
4337 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4338 struct eth_rx_sge *sge;
4340 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4342 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4343 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4345 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4346 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4349 bnx2x_init_sge_ring_bit_mask(fp);
4352 for (i = 1; i <= NUM_RX_RINGS; i++) {
4353 struct eth_rx_bd *rx_bd;
4355 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4357 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4358 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4360 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4361 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4365 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4366 struct eth_rx_cqe_next_page *nextpg;
4368 nextpg = (struct eth_rx_cqe_next_page *)
4369 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4371 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4372 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4374 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4375 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4378 /* Allocate SGEs and initialize the ring elements */
4379 for (i = 0, ring_prod = 0;
4380 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4382 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4383 BNX2X_ERR("was only able to allocate "
4385 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4386 /* Cleanup already allocated elements */
4387 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4388 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4389 fp->disable_tpa = 1;
4393 ring_prod = NEXT_SGE_IDX(ring_prod);
4395 fp->rx_sge_prod = ring_prod;
4397 /* Allocate BDs and initialize BD ring */
4398 fp->rx_comp_cons = 0;
4399 cqe_ring_prod = ring_prod = 0;
4400 for (i = 0; i < bp->rx_ring_size; i++) {
4401 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4402 BNX2X_ERR("was only able to allocate "
4404 bp->eth_stats.rx_skb_alloc_failed++;
4407 ring_prod = NEXT_RX_IDX(ring_prod);
4408 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4409 WARN_ON(ring_prod <= i);
4412 fp->rx_bd_prod = ring_prod;
4413 /* must not have more available CQEs than BDs */
4414 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4416 fp->rx_pkt = fp->rx_calls = 0;
4419 * this will generate an interrupt (to the TSTORM)
4420 * must only be done after chip is initialized
4422 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4427 REG_WR(bp, BAR_USTRORM_INTMEM +
4428 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4429 U64_LO(fp->rx_comp_mapping));
4430 REG_WR(bp, BAR_USTRORM_INTMEM +
4431 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4432 U64_HI(fp->rx_comp_mapping));
4436 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4440 for_each_tx_queue(bp, j) {
4441 struct bnx2x_fastpath *fp = &bp->fp[j];
4443 for (i = 1; i <= NUM_TX_RINGS; i++) {
4444 struct eth_tx_bd *tx_bd =
4445 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4448 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4449 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4451 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4452 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4455 fp->tx_pkt_prod = 0;
4456 fp->tx_pkt_cons = 0;
4459 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4464 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4466 int func = BP_FUNC(bp);
4468 spin_lock_init(&bp->spq_lock);
4470 bp->spq_left = MAX_SPQ_PENDING;
4471 bp->spq_prod_idx = 0;
4472 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4473 bp->spq_prod_bd = bp->spq;
4474 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4476 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4477 U64_LO(bp->spq_mapping));
4479 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4480 U64_HI(bp->spq_mapping));
4482 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4486 static void bnx2x_init_context(struct bnx2x *bp)
4490 for_each_queue(bp, i) {
4491 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4492 struct bnx2x_fastpath *fp = &bp->fp[i];
4493 u8 sb_id = FP_SB_ID(fp);
4495 context->ustorm_st_context.common.sb_index_numbers =
4496 BNX2X_RX_SB_INDEX_NUM;
4497 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498 context->ustorm_st_context.common.status_block_id = sb_id;
4499 context->ustorm_st_context.common.flags =
4500 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4501 context->ustorm_st_context.common.mc_alignment_log_size =
4502 BNX2X_RX_ALIGN_SHIFT;
4503 context->ustorm_st_context.common.bd_buff_size =
4505 context->ustorm_st_context.common.bd_page_base_hi =
4506 U64_HI(fp->rx_desc_mapping);
4507 context->ustorm_st_context.common.bd_page_base_lo =
4508 U64_LO(fp->rx_desc_mapping);
4509 if (!fp->disable_tpa) {
4510 context->ustorm_st_context.common.flags |=
4511 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4512 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4513 context->ustorm_st_context.common.sge_buff_size =
4514 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4516 context->ustorm_st_context.common.sge_page_base_hi =
4517 U64_HI(fp->rx_sge_mapping);
4518 context->ustorm_st_context.common.sge_page_base_lo =
4519 U64_LO(fp->rx_sge_mapping);
4522 context->ustorm_ag_context.cdu_usage =
4523 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4524 CDU_REGION_NUMBER_UCM_AG,
4525 ETH_CONNECTION_TYPE);
4527 context->xstorm_st_context.tx_bd_page_base_hi =
4528 U64_HI(fp->tx_desc_mapping);
4529 context->xstorm_st_context.tx_bd_page_base_lo =
4530 U64_LO(fp->tx_desc_mapping);
4531 context->xstorm_st_context.db_data_addr_hi =
4532 U64_HI(fp->tx_prods_mapping);
4533 context->xstorm_st_context.db_data_addr_lo =
4534 U64_LO(fp->tx_prods_mapping);
4535 context->xstorm_st_context.statistics_data = (fp->cl_id |
4536 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4537 context->cstorm_st_context.sb_index_number =
4538 C_SB_ETH_TX_CQ_INDEX;
4539 context->cstorm_st_context.status_block_id = sb_id;
4541 context->xstorm_ag_context.cdu_reserved =
4542 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4543 CDU_REGION_NUMBER_XCM_AG,
4544 ETH_CONNECTION_TYPE);
4548 static void bnx2x_init_ind_table(struct bnx2x *bp)
4550 int func = BP_FUNC(bp);
4553 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4557 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4558 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4559 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4560 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4561 BP_CL_ID(bp) + (i % bp->num_rx_queues));
4564 static void bnx2x_set_client_config(struct bnx2x *bp)
4566 struct tstorm_eth_client_config tstorm_client = {0};
4567 int port = BP_PORT(bp);
4570 tstorm_client.mtu = bp->dev->mtu;
4571 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4572 tstorm_client.config_flags =
4573 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4575 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4576 tstorm_client.config_flags |=
4577 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4578 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4582 if (bp->flags & TPA_ENABLE_FLAG) {
4583 tstorm_client.max_sges_for_packet =
4584 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4585 tstorm_client.max_sges_for_packet =
4586 ((tstorm_client.max_sges_for_packet +
4587 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4588 PAGES_PER_SGE_SHIFT;
4590 tstorm_client.config_flags |=
4591 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4594 for_each_queue(bp, i) {
4595 REG_WR(bp, BAR_TSTRORM_INTMEM +
4596 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4597 ((u32 *)&tstorm_client)[0]);
4598 REG_WR(bp, BAR_TSTRORM_INTMEM +
4599 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4600 ((u32 *)&tstorm_client)[1]);
4603 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4604 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4607 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4609 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4610 int mode = bp->rx_mode;
4611 int mask = (1 << BP_L_ID(bp));
4612 int func = BP_FUNC(bp);
4615 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4618 case BNX2X_RX_MODE_NONE: /* no Rx */
4619 tstorm_mac_filter.ucast_drop_all = mask;
4620 tstorm_mac_filter.mcast_drop_all = mask;
4621 tstorm_mac_filter.bcast_drop_all = mask;
4623 case BNX2X_RX_MODE_NORMAL:
4624 tstorm_mac_filter.bcast_accept_all = mask;
4626 case BNX2X_RX_MODE_ALLMULTI:
4627 tstorm_mac_filter.mcast_accept_all = mask;
4628 tstorm_mac_filter.bcast_accept_all = mask;
4630 case BNX2X_RX_MODE_PROMISC:
4631 tstorm_mac_filter.ucast_accept_all = mask;
4632 tstorm_mac_filter.mcast_accept_all = mask;
4633 tstorm_mac_filter.bcast_accept_all = mask;
4636 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4640 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4641 REG_WR(bp, BAR_TSTRORM_INTMEM +
4642 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4643 ((u32 *)&tstorm_mac_filter)[i]);
4645 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4646 ((u32 *)&tstorm_mac_filter)[i]); */
4649 if (mode != BNX2X_RX_MODE_NONE)
4650 bnx2x_set_client_config(bp);
4653 static void bnx2x_init_internal_common(struct bnx2x *bp)
4657 if (bp->flags & TPA_ENABLE_FLAG) {
4658 struct tstorm_eth_tpa_exist tpa = {0};
4662 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4664 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4668 /* Zero this manually as its initialization is
4669 currently missing in the initTool */
4670 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4671 REG_WR(bp, BAR_USTRORM_INTMEM +
4672 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4675 static void bnx2x_init_internal_port(struct bnx2x *bp)
4677 int port = BP_PORT(bp);
4679 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4680 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4682 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4685 static void bnx2x_init_internal_func(struct bnx2x *bp)
4687 struct tstorm_eth_function_common_config tstorm_config = {0};
4688 struct stats_indication_flags stats_flags = {0};
4689 int port = BP_PORT(bp);
4690 int func = BP_FUNC(bp);
4695 tstorm_config.config_flags = MULTI_FLAGS(bp);
4696 tstorm_config.rss_result_mask = MULTI_MASK;
4699 tstorm_config.config_flags |=
4700 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4702 tstorm_config.leading_client_id = BP_L_ID(bp);
4704 REG_WR(bp, BAR_TSTRORM_INTMEM +
4705 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4706 (*(u32 *)&tstorm_config));
4708 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4709 bnx2x_set_storm_rx_mode(bp);
4711 /* reset xstorm per client statistics */
4712 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4713 REG_WR(bp, BAR_XSTRORM_INTMEM +
4714 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4717 /* reset tstorm per client statistics */
4718 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4719 REG_WR(bp, BAR_TSTRORM_INTMEM +
4720 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4724 /* Init statistics related context */
4725 stats_flags.collect_eth = 1;
4727 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4728 ((u32 *)&stats_flags)[0]);
4729 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4730 ((u32 *)&stats_flags)[1]);
4732 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4733 ((u32 *)&stats_flags)[0]);
4734 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4735 ((u32 *)&stats_flags)[1]);
4737 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4738 ((u32 *)&stats_flags)[0]);
4739 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4740 ((u32 *)&stats_flags)[1]);
4742 REG_WR(bp, BAR_XSTRORM_INTMEM +
4743 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4744 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4745 REG_WR(bp, BAR_XSTRORM_INTMEM +
4746 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4747 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4749 REG_WR(bp, BAR_TSTRORM_INTMEM +
4750 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4751 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4752 REG_WR(bp, BAR_TSTRORM_INTMEM +
4753 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4754 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4756 if (CHIP_IS_E1H(bp)) {
4757 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4759 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4761 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4763 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4766 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4770 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4772 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4773 SGE_PAGE_SIZE * PAGES_PER_SGE),
4775 for_each_rx_queue(bp, i) {
4776 struct bnx2x_fastpath *fp = &bp->fp[i];
4778 REG_WR(bp, BAR_USTRORM_INTMEM +
4779 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4780 U64_LO(fp->rx_comp_mapping));
4781 REG_WR(bp, BAR_USTRORM_INTMEM +
4782 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4783 U64_HI(fp->rx_comp_mapping));
4785 REG_WR16(bp, BAR_USTRORM_INTMEM +
4786 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4791 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4793 switch (load_code) {
4794 case FW_MSG_CODE_DRV_LOAD_COMMON:
4795 bnx2x_init_internal_common(bp);
4798 case FW_MSG_CODE_DRV_LOAD_PORT:
4799 bnx2x_init_internal_port(bp);
4802 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4803 bnx2x_init_internal_func(bp);
4807 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4812 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4816 for_each_queue(bp, i) {
4817 struct bnx2x_fastpath *fp = &bp->fp[i];
4820 fp->state = BNX2X_FP_STATE_CLOSED;
4822 fp->cl_id = BP_L_ID(bp) + i;
4823 fp->sb_id = fp->cl_id;
4825 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4826 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4827 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4829 bnx2x_update_fpsb_idx(fp);
4832 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4834 bnx2x_update_dsb_idx(bp);
4835 bnx2x_update_coalesce(bp);
4836 bnx2x_init_rx_rings(bp);
4837 bnx2x_init_tx_ring(bp);
4838 bnx2x_init_sp_ring(bp);
4839 bnx2x_init_context(bp);
4840 bnx2x_init_internal(bp, load_code);
4841 bnx2x_init_ind_table(bp);
4842 bnx2x_stats_init(bp);
4844 /* At this point, we are ready for interrupts */
4845 atomic_set(&bp->intr_sem, 0);
4847 /* flush all before enabling interrupts */
4851 bnx2x_int_enable(bp);
4854 /* end of nic init */
4857 * gzip service functions
4860 static int bnx2x_gunzip_init(struct bnx2x *bp)
4862 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4863 &bp->gunzip_mapping);
4864 if (bp->gunzip_buf == NULL)
4867 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4868 if (bp->strm == NULL)
4871 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4873 if (bp->strm->workspace == NULL)
4883 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4884 bp->gunzip_mapping);
4885 bp->gunzip_buf = NULL;
4888 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4889 " un-compression\n", bp->dev->name);
4893 static void bnx2x_gunzip_end(struct bnx2x *bp)
4895 kfree(bp->strm->workspace);
4900 if (bp->gunzip_buf) {
4901 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4902 bp->gunzip_mapping);
4903 bp->gunzip_buf = NULL;
4907 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4911 /* check gzip header */
4912 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4919 if (zbuf[3] & FNAME)
4920 while ((zbuf[n++] != 0) && (n < len));
4922 bp->strm->next_in = zbuf + n;
4923 bp->strm->avail_in = len - n;
4924 bp->strm->next_out = bp->gunzip_buf;
4925 bp->strm->avail_out = FW_BUF_SIZE;
4927 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4931 rc = zlib_inflate(bp->strm, Z_FINISH);
4932 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4933 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4934 bp->dev->name, bp->strm->msg);
4936 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4937 if (bp->gunzip_outlen & 0x3)
4938 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4939 " gunzip_outlen (%d) not aligned\n",
4940 bp->dev->name, bp->gunzip_outlen);
4941 bp->gunzip_outlen >>= 2;
4943 zlib_inflateEnd(bp->strm);
4945 if (rc == Z_STREAM_END)
4951 /* nic load/unload */
4954 * General service functions
4957 /* send a NIG loopback debug packet */
4958 static void bnx2x_lb_pckt(struct bnx2x *bp)
4962 /* Ethernet source and destination addresses */
4963 wb_write[0] = 0x55555555;
4964 wb_write[1] = 0x55555555;
4965 wb_write[2] = 0x20; /* SOP */
4966 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4968 /* NON-IP protocol */
4969 wb_write[0] = 0x09000000;
4970 wb_write[1] = 0x55555555;
4971 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4972 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4975 /* some of the internal memories
4976 * are not directly readable from the driver
4977 * to test them we send debug packets
4979 static int bnx2x_int_mem_test(struct bnx2x *bp)
4985 if (CHIP_REV_IS_FPGA(bp))
4987 else if (CHIP_REV_IS_EMUL(bp))
4992 DP(NETIF_MSG_HW, "start part1\n");
4994 /* Disable inputs of parser neighbor blocks */
4995 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4996 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4997 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4998 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5000 /* Write 0 to parser credits for CFC search request */
5001 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5003 /* send Ethernet packet */
5006 /* TODO do i reset NIG statistic? */
5007 /* Wait until NIG register shows 1 packet of size 0x10 */
5008 count = 1000 * factor;
5011 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5012 val = *bnx2x_sp(bp, wb_data[0]);
5020 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5024 /* Wait until PRS register shows 1 packet */
5025 count = 1000 * factor;
5027 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5035 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5039 /* Reset and init BRB, PRS */
5040 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5044 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5045 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5047 DP(NETIF_MSG_HW, "part2\n");
5049 /* Disable inputs of parser neighbor blocks */
5050 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5051 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5052 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5053 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5055 /* Write 0 to parser credits for CFC search request */
5056 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5058 /* send 10 Ethernet packets */
5059 for (i = 0; i < 10; i++)
5062 /* Wait until NIG register shows 10 + 1
5063 packets of size 11*0x10 = 0xb0 */
5064 count = 1000 * factor;
5067 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5068 val = *bnx2x_sp(bp, wb_data[0]);
5076 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5080 /* Wait until PRS register shows 2 packets */
5081 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5083 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5085 /* Write 1 to parser credits for CFC search request */
5086 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5088 /* Wait until PRS register shows 3 packets */
5089 msleep(10 * factor);
5090 /* Wait until NIG register shows 1 packet of size 0x10 */
5091 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5093 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5095 /* clear NIG EOP FIFO */
5096 for (i = 0; i < 11; i++)
5097 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5098 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5100 BNX2X_ERR("clear of NIG failed\n");
5104 /* Reset and init BRB, PRS, NIG */
5105 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5107 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5109 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5110 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5113 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5116 /* Enable inputs of parser neighbor blocks */
5117 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5118 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5119 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5120 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5122 DP(NETIF_MSG_HW, "done\n");
5127 static void enable_blocks_attention(struct bnx2x *bp)
5129 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5130 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5131 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5132 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5133 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5134 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5135 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5136 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5137 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5138 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5139 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5140 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5141 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5142 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5143 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5144 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5145 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5146 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5147 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5148 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5149 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5150 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5151 if (CHIP_REV_IS_FPGA(bp))
5152 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5154 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5155 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5156 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5157 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5158 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5159 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5160 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5161 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5162 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5163 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5167 static void bnx2x_reset_common(struct bnx2x *bp)
5170 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5172 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5175 static int bnx2x_init_common(struct bnx2x *bp)
5179 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5181 bnx2x_reset_common(bp);
5182 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5183 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5185 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5186 if (CHIP_IS_E1H(bp))
5187 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5189 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5191 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5193 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5194 if (CHIP_IS_E1(bp)) {
5195 /* enable HW interrupt from PXP on USDM overflow
5196 bit 16 on INT_MASK_0 */
5197 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5200 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5204 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5205 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5206 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5207 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5208 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5209 /* make sure this value is 0 */
5210 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5212 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5213 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5214 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5215 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5216 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5219 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5221 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5222 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5223 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5226 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5227 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5229 /* let the HW do it's magic ... */
5231 /* finish PXP init */
5232 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5234 BNX2X_ERR("PXP2 CFG failed\n");
5237 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5239 BNX2X_ERR("PXP2 RD_INIT failed\n");
5243 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5244 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5246 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5248 /* clean the DMAE memory */
5250 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5252 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5253 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5254 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5255 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5257 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5258 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5259 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5260 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5262 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5263 /* soft reset pulse */
5264 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5265 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5268 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5271 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5272 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5273 if (!CHIP_REV_IS_SLOW(bp)) {
5274 /* enable hw interrupt from doorbell Q */
5275 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5278 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5279 if (CHIP_REV_IS_SLOW(bp)) {
5280 /* fix for emulation and FPGA for no pause */
5281 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5282 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5283 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5284 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5287 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5288 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5290 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5291 if (CHIP_IS_E1H(bp))
5292 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5294 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5295 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5296 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5297 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5299 if (CHIP_IS_E1H(bp)) {
5300 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5301 STORM_INTMEM_SIZE_E1H/2);
5303 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5304 0, STORM_INTMEM_SIZE_E1H/2);
5305 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5306 STORM_INTMEM_SIZE_E1H/2);
5308 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5309 0, STORM_INTMEM_SIZE_E1H/2);
5310 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5311 STORM_INTMEM_SIZE_E1H/2);
5313 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5314 0, STORM_INTMEM_SIZE_E1H/2);
5315 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5316 STORM_INTMEM_SIZE_E1H/2);
5318 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5319 0, STORM_INTMEM_SIZE_E1H/2);
5321 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5322 STORM_INTMEM_SIZE_E1);
5323 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5324 STORM_INTMEM_SIZE_E1);
5325 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5326 STORM_INTMEM_SIZE_E1);
5327 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5328 STORM_INTMEM_SIZE_E1);
5331 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5332 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5333 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5334 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5337 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5339 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5342 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5343 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5344 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5346 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5347 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5348 REG_WR(bp, i, 0xc0cac01a);
5349 /* TODO: replace with something meaningful */
5351 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5352 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5354 if (sizeof(union cdu_context) != 1024)
5355 /* we currently assume that a context is 1024 bytes */
5356 printk(KERN_ALERT PFX "please adjust the size of"
5357 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5359 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5360 val = (4 << 24) + (0 << 12) + 1024;
5361 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5362 if (CHIP_IS_E1(bp)) {
5363 /* !!! fix pxp client crdit until excel update */
5364 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5365 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5368 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5369 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5370 /* enable context validation interrupt from CFC */
5371 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5373 /* set the thresholds to prevent CFC/CDU race */
5374 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5376 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5377 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5379 /* PXPCS COMMON comes here */
5380 /* Reset PCIE errors for debug */
5381 REG_WR(bp, 0x2814, 0xffffffff);
5382 REG_WR(bp, 0x3820, 0xffffffff);
5384 /* EMAC0 COMMON comes here */
5385 /* EMAC1 COMMON comes here */
5386 /* DBU COMMON comes here */
5387 /* DBG COMMON comes here */
5389 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5390 if (CHIP_IS_E1H(bp)) {
5391 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5392 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5395 if (CHIP_REV_IS_SLOW(bp))
5398 /* finish CFC init */
5399 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5401 BNX2X_ERR("CFC LL_INIT failed\n");
5404 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5406 BNX2X_ERR("CFC AC_INIT failed\n");
5409 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5411 BNX2X_ERR("CFC CAM_INIT failed\n");
5414 REG_WR(bp, CFC_REG_DEBUG0, 0);
5416 /* read NIG statistic
5417 to see if this is our first up since powerup */
5418 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5419 val = *bnx2x_sp(bp, wb_data[0]);
5421 /* do internal memory self test */
5422 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5423 BNX2X_ERR("internal mem self test failed\n");
5427 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5428 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5429 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5430 /* Fan failure is indicated by SPIO 5 */
5431 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5432 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5434 /* set to active low mode */
5435 val = REG_RD(bp, MISC_REG_SPIO_INT);
5436 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5437 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5438 REG_WR(bp, MISC_REG_SPIO_INT, val);
5440 /* enable interrupt to signal the IGU */
5441 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5442 val |= (1 << MISC_REGISTERS_SPIO_5);
5443 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5450 /* clear PXP2 attentions */
5451 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5453 enable_blocks_attention(bp);
5455 if (!BP_NOMCP(bp)) {
5456 bnx2x_acquire_phy_lock(bp);
5457 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5458 bnx2x_release_phy_lock(bp);
5460 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5465 static int bnx2x_init_port(struct bnx2x *bp)
5467 int port = BP_PORT(bp);
5470 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5472 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5474 /* Port PXP comes here */
5475 /* Port PXP2 comes here */
5480 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5481 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5482 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5483 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5488 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5489 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5490 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5491 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5496 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5497 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5498 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5499 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5501 /* Port CMs come here */
5502 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5503 (port ? XCM_PORT1_END : XCM_PORT0_END));
5505 /* Port QM comes here */
5507 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5508 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5510 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5511 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5513 /* Port DQ comes here */
5514 /* Port BRB1 comes here */
5515 /* Port PRS comes here */
5516 /* Port TSDM comes here */
5517 /* Port CSDM comes here */
5518 /* Port USDM comes here */
5519 /* Port XSDM comes here */
5520 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5521 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5522 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5523 port ? USEM_PORT1_END : USEM_PORT0_END);
5524 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5525 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5526 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5527 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5528 /* Port UPB comes here */
5529 /* Port XPB comes here */
5531 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5532 port ? PBF_PORT1_END : PBF_PORT0_END);
5534 /* configure PBF to work without PAUSE mtu 9000 */
5535 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5537 /* update threshold */
5538 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5539 /* update init credit */
5540 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5543 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5545 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5548 /* tell the searcher where the T2 table is */
5549 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5551 wb_write[0] = U64_LO(bp->t2_mapping);
5552 wb_write[1] = U64_HI(bp->t2_mapping);
5553 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5554 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5555 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5556 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5558 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5559 /* Port SRCH comes here */
5561 /* Port CDU comes here */
5562 /* Port CFC comes here */
5564 if (CHIP_IS_E1(bp)) {
5565 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5566 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5568 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5569 port ? HC_PORT1_END : HC_PORT0_END);
5571 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5572 MISC_AEU_PORT0_START,
5573 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5574 /* init aeu_mask_attn_func_0/1:
5575 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5576 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5577 * bits 4-7 are used for "per vn group attention" */
5578 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5579 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5581 /* Port PXPCS comes here */
5582 /* Port EMAC0 comes here */
5583 /* Port EMAC1 comes here */
5584 /* Port DBU comes here */
5585 /* Port DBG comes here */
5586 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5587 port ? NIG_PORT1_END : NIG_PORT0_END);
5589 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5591 if (CHIP_IS_E1H(bp)) {
5593 struct cmng_struct_per_port m_cmng_port;
5596 /* 0x2 disable e1hov, 0x1 enable */
5597 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5598 (IS_E1HMF(bp) ? 0x1 : 0x2));
5600 /* Init RATE SHAPING and FAIRNESS contexts.
5601 Initialize as if there is 10G link. */
5602 wsum = bnx2x_calc_vn_wsum(bp);
5603 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5605 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5606 bnx2x_init_vn_minmax(bp, 2*vn + port,
5607 wsum, 10000, &m_cmng_port);
5610 /* Port MCP comes here */
5611 /* Port DMAE comes here */
5613 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5614 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5615 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5616 /* add SPIO 5 to group 0 */
5617 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5618 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5619 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5626 bnx2x__link_reset(bp);
5631 #define ILT_PER_FUNC (768/2)
5632 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5633 /* the phys address is shifted right 12 bits and has an added
5634 1=valid bit added to the 53rd bit
5635 then since this is a wide register(TM)
5636 we split it into two 32 bit writes
5638 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5639 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5640 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5641 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5643 #define CNIC_ILT_LINES 0
5645 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5649 if (CHIP_IS_E1H(bp))
5650 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5652 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5654 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5657 static int bnx2x_init_func(struct bnx2x *bp)
5659 int port = BP_PORT(bp);
5660 int func = BP_FUNC(bp);
5664 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5666 /* set MSI reconfigure capability */
5667 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5668 val = REG_RD(bp, addr);
5669 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5670 REG_WR(bp, addr, val);
5672 i = FUNC_ILT_BASE(func);
5674 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5675 if (CHIP_IS_E1H(bp)) {
5676 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5677 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5679 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5680 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5683 if (CHIP_IS_E1H(bp)) {
5684 for (i = 0; i < 9; i++)
5685 bnx2x_init_block(bp,
5686 cm_start[func][i], cm_end[func][i]);
5688 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5689 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5692 /* HC init per function */
5693 if (CHIP_IS_E1H(bp)) {
5694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5696 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5697 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5699 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5701 /* Reset PCIE errors for debug */
5702 REG_WR(bp, 0x2114, 0xffffffff);
5703 REG_WR(bp, 0x2120, 0xffffffff);
5708 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5712 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5713 BP_FUNC(bp), load_code);
5716 mutex_init(&bp->dmae_mutex);
5717 bnx2x_gunzip_init(bp);
5719 switch (load_code) {
5720 case FW_MSG_CODE_DRV_LOAD_COMMON:
5721 rc = bnx2x_init_common(bp);
5726 case FW_MSG_CODE_DRV_LOAD_PORT:
5728 rc = bnx2x_init_port(bp);
5733 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5735 rc = bnx2x_init_func(bp);
5741 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5745 if (!BP_NOMCP(bp)) {
5746 int func = BP_FUNC(bp);
5748 bp->fw_drv_pulse_wr_seq =
5749 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5750 DRV_PULSE_SEQ_MASK);
5751 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5752 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5753 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5757 /* this needs to be done before gunzip end */
5758 bnx2x_zero_def_sb(bp);
5759 for_each_queue(bp, i)
5760 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5763 bnx2x_gunzip_end(bp);
5768 /* send the MCP a request, block until there is a reply */
5769 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5771 int func = BP_FUNC(bp);
5772 u32 seq = ++bp->fw_seq;
5775 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5777 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5778 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5781 /* let the FW do it's magic ... */
5784 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5786 /* Give the FW up to 2 second (200*10ms) */
5787 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5789 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5790 cnt*delay, rc, seq);
5792 /* is this a reply to our command? */
5793 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5794 rc &= FW_MSG_CODE_MASK;
5798 BNX2X_ERR("FW failed to respond!\n");
5806 static void bnx2x_free_mem(struct bnx2x *bp)
5809 #define BNX2X_PCI_FREE(x, y, size) \
5812 pci_free_consistent(bp->pdev, size, x, y); \
5818 #define BNX2X_FREE(x) \
5830 for_each_queue(bp, i) {
5833 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5834 bnx2x_fp(bp, i, status_blk_mapping),
5835 sizeof(struct host_status_block) +
5836 sizeof(struct eth_tx_db_data));
5839 for_each_rx_queue(bp, i) {
5841 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5842 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5843 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5844 bnx2x_fp(bp, i, rx_desc_mapping),
5845 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5847 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5848 bnx2x_fp(bp, i, rx_comp_mapping),
5849 sizeof(struct eth_fast_path_rx_cqe) *
5853 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5854 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5855 bnx2x_fp(bp, i, rx_sge_mapping),
5856 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5859 for_each_tx_queue(bp, i) {
5861 /* fastpath tx rings: tx_buf tx_desc */
5862 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5863 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5864 bnx2x_fp(bp, i, tx_desc_mapping),
5865 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5867 /* end of fastpath */
5869 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5870 sizeof(struct host_def_status_block));
5872 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5873 sizeof(struct bnx2x_slowpath));
5876 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5877 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5878 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5879 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5881 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5883 #undef BNX2X_PCI_FREE
5887 static int bnx2x_alloc_mem(struct bnx2x *bp)
5890 #define BNX2X_PCI_ALLOC(x, y, size) \
5892 x = pci_alloc_consistent(bp->pdev, size, y); \
5894 goto alloc_mem_err; \
5895 memset(x, 0, size); \
5898 #define BNX2X_ALLOC(x, size) \
5900 x = vmalloc(size); \
5902 goto alloc_mem_err; \
5903 memset(x, 0, size); \
5910 for_each_queue(bp, i) {
5911 bnx2x_fp(bp, i, bp) = bp;
5914 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5915 &bnx2x_fp(bp, i, status_blk_mapping),
5916 sizeof(struct host_status_block) +
5917 sizeof(struct eth_tx_db_data));
5920 for_each_rx_queue(bp, i) {
5922 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5923 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5924 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5925 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5926 &bnx2x_fp(bp, i, rx_desc_mapping),
5927 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5929 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5930 &bnx2x_fp(bp, i, rx_comp_mapping),
5931 sizeof(struct eth_fast_path_rx_cqe) *
5935 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5936 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5937 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5938 &bnx2x_fp(bp, i, rx_sge_mapping),
5939 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5942 for_each_tx_queue(bp, i) {
5944 bnx2x_fp(bp, i, hw_tx_prods) =
5945 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5947 bnx2x_fp(bp, i, tx_prods_mapping) =
5948 bnx2x_fp(bp, i, status_blk_mapping) +
5949 sizeof(struct host_status_block);
5951 /* fastpath tx rings: tx_buf tx_desc */
5952 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5953 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5954 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5955 &bnx2x_fp(bp, i, tx_desc_mapping),
5956 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5958 /* end of fastpath */
5960 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5961 sizeof(struct host_def_status_block));
5963 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5964 sizeof(struct bnx2x_slowpath));
5967 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5970 for (i = 0; i < 64*1024; i += 64) {
5971 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5972 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5975 /* allocate searcher T2 table
5976 we allocate 1/4 of alloc num for T2
5977 (which is not entered into the ILT) */
5978 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5981 for (i = 0; i < 16*1024; i += 64)
5982 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5984 /* now fixup the last line in the block to point to the next block */
5985 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5987 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5988 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5990 /* QM queues (128*MAX_CONN) */
5991 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5994 /* Slow path ring */
5995 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6003 #undef BNX2X_PCI_ALLOC
6007 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6011 for_each_tx_queue(bp, i) {
6012 struct bnx2x_fastpath *fp = &bp->fp[i];
6014 u16 bd_cons = fp->tx_bd_cons;
6015 u16 sw_prod = fp->tx_pkt_prod;
6016 u16 sw_cons = fp->tx_pkt_cons;
6018 while (sw_cons != sw_prod) {
6019 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6025 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6029 for_each_rx_queue(bp, j) {
6030 struct bnx2x_fastpath *fp = &bp->fp[j];
6032 for (i = 0; i < NUM_RX_BD; i++) {
6033 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6034 struct sk_buff *skb = rx_buf->skb;
6039 pci_unmap_single(bp->pdev,
6040 pci_unmap_addr(rx_buf, mapping),
6042 PCI_DMA_FROMDEVICE);
6047 if (!fp->disable_tpa)
6048 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6049 ETH_MAX_AGGREGATION_QUEUES_E1 :
6050 ETH_MAX_AGGREGATION_QUEUES_E1H);
6054 static void bnx2x_free_skbs(struct bnx2x *bp)
6056 bnx2x_free_tx_skbs(bp);
6057 bnx2x_free_rx_skbs(bp);
6060 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6064 free_irq(bp->msix_table[0].vector, bp->dev);
6065 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6066 bp->msix_table[0].vector);
6068 for_each_queue(bp, i) {
6069 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6070 "state %x\n", i, bp->msix_table[i + offset].vector,
6071 bnx2x_fp(bp, i, state));
6073 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6077 static void bnx2x_free_irq(struct bnx2x *bp)
6079 if (bp->flags & USING_MSIX_FLAG) {
6080 bnx2x_free_msix_irqs(bp);
6081 pci_disable_msix(bp->pdev);
6082 bp->flags &= ~USING_MSIX_FLAG;
6084 } else if (bp->flags & USING_MSI_FLAG) {
6085 free_irq(bp->pdev->irq, bp->dev);
6086 pci_disable_msi(bp->pdev);
6087 bp->flags &= ~USING_MSI_FLAG;
6090 free_irq(bp->pdev->irq, bp->dev);
6093 static int bnx2x_enable_msix(struct bnx2x *bp)
6095 int i, rc, offset = 1;
6098 bp->msix_table[0].entry = igu_vec;
6099 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6101 for_each_queue(bp, i) {
6102 igu_vec = BP_L_ID(bp) + offset + i;
6103 bp->msix_table[i + offset].entry = igu_vec;
6104 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6105 "(fastpath #%u)\n", i + offset, igu_vec, i);
6108 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6109 BNX2X_NUM_QUEUES(bp) + offset);
6111 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6115 bp->flags |= USING_MSIX_FLAG;
6120 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6122 int i, rc, offset = 1;
6124 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6125 bp->dev->name, bp->dev);
6127 BNX2X_ERR("request sp irq failed\n");
6131 for_each_queue(bp, i) {
6132 struct bnx2x_fastpath *fp = &bp->fp[i];
6134 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6135 rc = request_irq(bp->msix_table[i + offset].vector,
6136 bnx2x_msix_fp_int, 0, fp->name, fp);
6138 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6139 bnx2x_free_msix_irqs(bp);
6143 fp->state = BNX2X_FP_STATE_IRQ;
6146 i = BNX2X_NUM_QUEUES(bp);
6148 printk(KERN_INFO PFX
6149 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6150 bp->dev->name, bp->msix_table[0].vector,
6151 bp->msix_table[offset].vector,
6152 bp->msix_table[offset + i - 1].vector);
6154 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6155 bp->dev->name, bp->msix_table[0].vector,
6156 bp->msix_table[offset + i - 1].vector);
6161 static int bnx2x_enable_msi(struct bnx2x *bp)
6165 rc = pci_enable_msi(bp->pdev);
6167 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6170 bp->flags |= USING_MSI_FLAG;
6175 static int bnx2x_req_irq(struct bnx2x *bp)
6177 unsigned long flags;
6180 if (bp->flags & USING_MSI_FLAG)
6183 flags = IRQF_SHARED;
6185 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6186 bp->dev->name, bp->dev);
6188 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6193 static void bnx2x_napi_enable(struct bnx2x *bp)
6197 for_each_rx_queue(bp, i)
6198 napi_enable(&bnx2x_fp(bp, i, napi));
6201 static void bnx2x_napi_disable(struct bnx2x *bp)
6205 for_each_rx_queue(bp, i)
6206 napi_disable(&bnx2x_fp(bp, i, napi));
6209 static void bnx2x_netif_start(struct bnx2x *bp)
6211 if (atomic_dec_and_test(&bp->intr_sem)) {
6212 if (netif_running(bp->dev)) {
6213 bnx2x_napi_enable(bp);
6214 bnx2x_int_enable(bp);
6215 if (bp->state == BNX2X_STATE_OPEN)
6216 netif_tx_wake_all_queues(bp->dev);
6221 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6223 bnx2x_int_disable_sync(bp, disable_hw);
6224 bnx2x_napi_disable(bp);
6225 if (netif_running(bp->dev)) {
6226 netif_tx_disable(bp->dev);
6227 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6232 * Init service functions
6235 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6237 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6238 int port = BP_PORT(bp);
6241 * unicasts 0-31:port0 32-63:port1
6242 * multicast 64-127:port0 128-191:port1
6244 config->hdr.length = 2;
6245 config->hdr.offset = port ? 32 : 0;
6246 config->hdr.client_id = BP_CL_ID(bp);
6247 config->hdr.reserved1 = 0;
6250 config->config_table[0].cam_entry.msb_mac_addr =
6251 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6252 config->config_table[0].cam_entry.middle_mac_addr =
6253 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6254 config->config_table[0].cam_entry.lsb_mac_addr =
6255 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6256 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6258 config->config_table[0].target_table_entry.flags = 0;
6260 CAM_INVALIDATE(config->config_table[0]);
6261 config->config_table[0].target_table_entry.client_id = 0;
6262 config->config_table[0].target_table_entry.vlan_id = 0;
6264 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6265 (set ? "setting" : "clearing"),
6266 config->config_table[0].cam_entry.msb_mac_addr,
6267 config->config_table[0].cam_entry.middle_mac_addr,
6268 config->config_table[0].cam_entry.lsb_mac_addr);
6271 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6272 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6273 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6274 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6276 config->config_table[1].target_table_entry.flags =
6277 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6279 CAM_INVALIDATE(config->config_table[1]);
6280 config->config_table[1].target_table_entry.client_id = 0;
6281 config->config_table[1].target_table_entry.vlan_id = 0;
6283 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6284 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6285 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6288 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6290 struct mac_configuration_cmd_e1h *config =
6291 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6293 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6294 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6298 /* CAM allocation for E1H
6299 * unicasts: by func number
6300 * multicast: 20+FUNC*20, 20 each
6302 config->hdr.length = 1;
6303 config->hdr.offset = BP_FUNC(bp);
6304 config->hdr.client_id = BP_CL_ID(bp);
6305 config->hdr.reserved1 = 0;
6308 config->config_table[0].msb_mac_addr =
6309 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6310 config->config_table[0].middle_mac_addr =
6311 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6312 config->config_table[0].lsb_mac_addr =
6313 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6314 config->config_table[0].client_id = BP_L_ID(bp);
6315 config->config_table[0].vlan_id = 0;
6316 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6318 config->config_table[0].flags = BP_PORT(bp);
6320 config->config_table[0].flags =
6321 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6323 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6324 (set ? "setting" : "clearing"),
6325 config->config_table[0].msb_mac_addr,
6326 config->config_table[0].middle_mac_addr,
6327 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6329 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6330 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6331 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6334 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6335 int *state_p, int poll)
6337 /* can take a while if any port is running */
6340 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6341 poll ? "polling" : "waiting", state, idx);
6346 bnx2x_rx_int(bp->fp, 10);
6347 /* if index is different from 0
6348 * the reply for some commands will
6349 * be on the non default queue
6352 bnx2x_rx_int(&bp->fp[idx], 10);
6355 mb(); /* state is changed by bnx2x_sp_event() */
6356 if (*state_p == state)
6363 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6364 poll ? "polling" : "waiting", state, idx);
6365 #ifdef BNX2X_STOP_ON_ERROR
6372 static int bnx2x_setup_leading(struct bnx2x *bp)
6376 /* reset IGU state */
6377 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6380 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6382 /* Wait for completion */
6383 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6388 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6390 struct bnx2x_fastpath *fp = &bp->fp[index];
6392 /* reset IGU state */
6393 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6396 fp->state = BNX2X_FP_STATE_OPENING;
6397 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6400 /* Wait for completion */
6401 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6405 static int bnx2x_poll(struct napi_struct *napi, int budget);
6407 static void bnx2x_set_int_mode(struct bnx2x *bp)
6415 bp->num_rx_queues = num_queues;
6416 bp->num_tx_queues = num_queues;
6418 "set number of queues to %d\n", num_queues);
6423 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6424 num_queues = min_t(u32, num_online_cpus(),
6425 BNX2X_MAX_QUEUES(bp));
6428 bp->num_rx_queues = num_queues;
6429 bp->num_tx_queues = num_queues;
6430 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6431 " number of tx queues to %d\n",
6432 bp->num_rx_queues, bp->num_tx_queues);
6433 /* if we can't use MSI-X we only need one fp,
6434 * so try to enable MSI-X with the requested number of fp's
6435 * and fallback to MSI or legacy INTx with one fp
6437 if (bnx2x_enable_msix(bp)) {
6438 /* failed to enable MSI-X */
6440 bp->num_rx_queues = num_queues;
6441 bp->num_tx_queues = num_queues;
6443 BNX2X_ERR("Multi requested but failed to "
6444 "enable MSI-X set number of "
6445 "queues to %d\n", num_queues);
6449 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6452 static void bnx2x_set_rx_mode(struct net_device *dev);
6454 /* must be called with rtnl_lock */
6455 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6459 #ifdef BNX2X_STOP_ON_ERROR
6460 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6461 if (unlikely(bp->panic))
6465 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6467 bnx2x_set_int_mode(bp);
6469 if (bnx2x_alloc_mem(bp))
6472 for_each_rx_queue(bp, i)
6473 bnx2x_fp(bp, i, disable_tpa) =
6474 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6476 for_each_rx_queue(bp, i)
6477 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6480 #ifdef BNX2X_STOP_ON_ERROR
6481 for_each_rx_queue(bp, i) {
6482 struct bnx2x_fastpath *fp = &bp->fp[i];
6484 fp->poll_no_work = 0;
6486 fp->poll_max_calls = 0;
6487 fp->poll_complete = 0;
6491 bnx2x_napi_enable(bp);
6493 if (bp->flags & USING_MSIX_FLAG) {
6494 rc = bnx2x_req_msix_irqs(bp);
6496 pci_disable_msix(bp->pdev);
6500 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6501 bnx2x_enable_msi(bp);
6503 rc = bnx2x_req_irq(bp);
6505 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6506 if (bp->flags & USING_MSI_FLAG)
6507 pci_disable_msi(bp->pdev);
6510 if (bp->flags & USING_MSI_FLAG) {
6511 bp->dev->irq = bp->pdev->irq;
6512 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6513 bp->dev->name, bp->pdev->irq);
6517 /* Send LOAD_REQUEST command to MCP
6518 Returns the type of LOAD command:
6519 if it is the first port to be initialized
6520 common blocks should be initialized, otherwise - not
6522 if (!BP_NOMCP(bp)) {
6523 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6525 BNX2X_ERR("MCP response failure, aborting\n");
6529 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6530 rc = -EBUSY; /* other port in diagnostic mode */
6535 int port = BP_PORT(bp);
6537 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6538 load_count[0], load_count[1], load_count[2]);
6540 load_count[1 + port]++;
6541 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6542 load_count[0], load_count[1], load_count[2]);
6543 if (load_count[0] == 1)
6544 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6545 else if (load_count[1 + port] == 1)
6546 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6548 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6551 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6552 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6556 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6559 rc = bnx2x_init_hw(bp, load_code);
6561 BNX2X_ERR("HW init failed, aborting\n");
6565 /* Setup NIC internals and enable interrupts */
6566 bnx2x_nic_init(bp, load_code);
6568 /* Send LOAD_DONE command to MCP */
6569 if (!BP_NOMCP(bp)) {
6570 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6572 BNX2X_ERR("MCP response failure, aborting\n");
6578 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6580 rc = bnx2x_setup_leading(bp);
6582 BNX2X_ERR("Setup leading failed!\n");
6586 if (CHIP_IS_E1H(bp))
6587 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6588 BNX2X_ERR("!!! mf_cfg function disabled\n");
6589 bp->state = BNX2X_STATE_DISABLED;
6592 if (bp->state == BNX2X_STATE_OPEN)
6593 for_each_nondefault_queue(bp, i) {
6594 rc = bnx2x_setup_multi(bp, i);
6600 bnx2x_set_mac_addr_e1(bp, 1);
6602 bnx2x_set_mac_addr_e1h(bp, 1);
6605 bnx2x_initial_phy_init(bp);
6607 /* Start fast path */
6608 switch (load_mode) {
6610 /* Tx queue should be only reenabled */
6611 netif_tx_wake_all_queues(bp->dev);
6612 /* Initialize the receive filter. */
6613 bnx2x_set_rx_mode(bp->dev);
6617 netif_tx_start_all_queues(bp->dev);
6618 /* Initialize the receive filter. */
6619 bnx2x_set_rx_mode(bp->dev);
6623 /* Initialize the receive filter. */
6624 bnx2x_set_rx_mode(bp->dev);
6625 bp->state = BNX2X_STATE_DIAG;
6633 bnx2x__link_status_update(bp);
6635 /* start the timer */
6636 mod_timer(&bp->timer, jiffies + bp->current_interval);
6642 bnx2x_int_disable_sync(bp, 1);
6643 if (!BP_NOMCP(bp)) {
6644 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6645 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6648 /* Free SKBs, SGEs, TPA pool and driver internals */
6649 bnx2x_free_skbs(bp);
6650 for_each_rx_queue(bp, i)
6651 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6656 bnx2x_napi_disable(bp);
6657 for_each_rx_queue(bp, i)
6658 netif_napi_del(&bnx2x_fp(bp, i, napi));
6661 /* TBD we really need to reset the chip
6662 if we want to recover from this */
6666 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6668 struct bnx2x_fastpath *fp = &bp->fp[index];
6671 /* halt the connection */
6672 fp->state = BNX2X_FP_STATE_HALTING;
6673 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
6675 /* Wait for completion */
6676 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6678 if (rc) /* timeout */
6681 /* delete cfc entry */
6682 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6684 /* Wait for completion */
6685 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6690 static int bnx2x_stop_leading(struct bnx2x *bp)
6692 u16 dsb_sp_prod_idx;
6693 /* if the other port is handling traffic,
6694 this can take a lot of time */
6700 /* Send HALT ramrod */
6701 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6702 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6704 /* Wait for completion */
6705 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6706 &(bp->fp[0].state), 1);
6707 if (rc) /* timeout */
6710 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6712 /* Send PORT_DELETE ramrod */
6713 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6715 /* Wait for completion to arrive on default status block
6716 we are going to reset the chip anyway
6717 so there is not much to do if this times out
6719 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6721 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6722 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6723 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6724 #ifdef BNX2X_STOP_ON_ERROR
6733 rmb(); /* Refresh the dsb_sp_prod */
6735 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6736 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6741 static void bnx2x_reset_func(struct bnx2x *bp)
6743 int port = BP_PORT(bp);
6744 int func = BP_FUNC(bp);
6748 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6749 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6752 base = FUNC_ILT_BASE(func);
6753 for (i = base; i < base + ILT_PER_FUNC; i++)
6754 bnx2x_ilt_wr(bp, i, 0);
6757 static void bnx2x_reset_port(struct bnx2x *bp)
6759 int port = BP_PORT(bp);
6762 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6764 /* Do not rcv packets to BRB */
6765 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6766 /* Do not direct rcv packets that are not for MCP to the BRB */
6767 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6768 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6771 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6774 /* Check for BRB port occupancy */
6775 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6777 DP(NETIF_MSG_IFDOWN,
6778 "BRB1 is not empty %d blocks are occupied\n", val);
6780 /* TODO: Close Doorbell port? */
6783 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6785 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6786 BP_FUNC(bp), reset_code);
6788 switch (reset_code) {
6789 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6790 bnx2x_reset_port(bp);
6791 bnx2x_reset_func(bp);
6792 bnx2x_reset_common(bp);
6795 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6796 bnx2x_reset_port(bp);
6797 bnx2x_reset_func(bp);
6800 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6801 bnx2x_reset_func(bp);
6805 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6810 /* must be called with rtnl_lock */
6811 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6813 int port = BP_PORT(bp);
6817 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6819 bp->rx_mode = BNX2X_RX_MODE_NONE;
6820 bnx2x_set_storm_rx_mode(bp);
6822 bnx2x_netif_stop(bp, 1);
6824 del_timer_sync(&bp->timer);
6825 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6826 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6827 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6832 /* Wait until tx fastpath tasks complete */
6833 for_each_tx_queue(bp, i) {
6834 struct bnx2x_fastpath *fp = &bp->fp[i];
6838 while (bnx2x_has_tx_work_unload(fp)) {
6840 bnx2x_tx_int(fp, 1000);
6842 BNX2X_ERR("timeout waiting for queue[%d]\n",
6844 #ifdef BNX2X_STOP_ON_ERROR
6856 /* Give HW time to discard old tx messages */
6859 if (CHIP_IS_E1(bp)) {
6860 struct mac_configuration_cmd *config =
6861 bnx2x_sp(bp, mcast_config);
6863 bnx2x_set_mac_addr_e1(bp, 0);
6865 for (i = 0; i < config->hdr.length; i++)
6866 CAM_INVALIDATE(config->config_table[i]);
6868 config->hdr.length = i;
6869 if (CHIP_REV_IS_SLOW(bp))
6870 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6872 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6873 config->hdr.client_id = BP_CL_ID(bp);
6874 config->hdr.reserved1 = 0;
6876 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6877 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6878 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6881 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6883 bnx2x_set_mac_addr_e1h(bp, 0);
6885 for (i = 0; i < MC_HASH_SIZE; i++)
6886 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6889 if (unload_mode == UNLOAD_NORMAL)
6890 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6892 else if (bp->flags & NO_WOL_FLAG) {
6893 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6894 if (CHIP_IS_E1H(bp))
6895 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6897 } else if (bp->wol) {
6898 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6899 u8 *mac_addr = bp->dev->dev_addr;
6901 /* The mac address is written to entries 1-4 to
6902 preserve entry 0 which is used by the PMF */
6903 u8 entry = (BP_E1HVN(bp) + 1)*8;
6905 val = (mac_addr[0] << 8) | mac_addr[1];
6906 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6909 (mac_addr[4] << 8) | mac_addr[5];
6910 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6912 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6915 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6917 /* Close multi and leading connections
6918 Completions for ramrods are collected in a synchronous way */
6919 for_each_nondefault_queue(bp, i)
6920 if (bnx2x_stop_multi(bp, i))
6923 rc = bnx2x_stop_leading(bp);
6925 BNX2X_ERR("Stop leading failed!\n");
6926 #ifdef BNX2X_STOP_ON_ERROR
6935 reset_code = bnx2x_fw_command(bp, reset_code);
6937 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6938 load_count[0], load_count[1], load_count[2]);
6940 load_count[1 + port]--;
6941 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6942 load_count[0], load_count[1], load_count[2]);
6943 if (load_count[0] == 0)
6944 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6945 else if (load_count[1 + port] == 0)
6946 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6948 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6951 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6952 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6953 bnx2x__link_reset(bp);
6955 /* Reset the chip */
6956 bnx2x_reset_chip(bp, reset_code);
6958 /* Report UNLOAD_DONE to MCP */
6960 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6963 /* Free SKBs, SGEs, TPA pool and driver internals */
6964 bnx2x_free_skbs(bp);
6965 for_each_rx_queue(bp, i)
6966 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6967 for_each_rx_queue(bp, i)
6968 netif_napi_del(&bnx2x_fp(bp, i, napi));
6971 bp->state = BNX2X_STATE_CLOSED;
6973 netif_carrier_off(bp->dev);
6978 static void bnx2x_reset_task(struct work_struct *work)
6980 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6982 #ifdef BNX2X_STOP_ON_ERROR
6983 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6984 " so reset not done to allow debug dump,\n"
6985 KERN_ERR " you will need to reboot when done\n");
6991 if (!netif_running(bp->dev))
6992 goto reset_task_exit;
6994 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6995 bnx2x_nic_load(bp, LOAD_NORMAL);
7001 /* end of nic load/unload */
7006 * Init service functions
7009 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7013 /* Check if there is any driver already loaded */
7014 val = REG_RD(bp, MISC_REG_UNPREPARED);
7016 /* Check if it is the UNDI driver
7017 * UNDI driver initializes CID offset for normal bell to 0x7
7019 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7020 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7022 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7024 int func = BP_FUNC(bp);
7028 /* clear the UNDI indication */
7029 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7031 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7033 /* try unload UNDI on port 0 */
7036 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7037 DRV_MSG_SEQ_NUMBER_MASK);
7038 reset_code = bnx2x_fw_command(bp, reset_code);
7040 /* if UNDI is loaded on the other port */
7041 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7043 /* send "DONE" for previous unload */
7044 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7046 /* unload UNDI on port 1 */
7049 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7050 DRV_MSG_SEQ_NUMBER_MASK);
7051 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7053 bnx2x_fw_command(bp, reset_code);
7056 /* now it's safe to release the lock */
7057 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7059 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
7060 HC_REG_CONFIG_0), 0x1000);
7062 /* close input traffic and wait for it */
7063 /* Do not rcv packets to BRB */
7065 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7066 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7067 /* Do not direct rcv packets that are not for MCP to
7070 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7071 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7074 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7075 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7078 /* save NIG port swap info */
7079 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7080 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7083 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7086 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7088 /* take the NIG out of reset and restore swap values */
7090 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7091 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7092 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7093 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7095 /* send unload done to the MCP */
7096 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7098 /* restore our func and fw_seq */
7101 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7102 DRV_MSG_SEQ_NUMBER_MASK);
7105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7109 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7111 u32 val, val2, val3, val4, id;
7114 /* Get the chip revision id and number. */
7115 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7116 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7117 id = ((val & 0xffff) << 16);
7118 val = REG_RD(bp, MISC_REG_CHIP_REV);
7119 id |= ((val & 0xf) << 12);
7120 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7121 id |= ((val & 0xff) << 4);
7122 val = REG_RD(bp, MISC_REG_BOND_ID);
7124 bp->common.chip_id = id;
7125 bp->link_params.chip_id = bp->common.chip_id;
7126 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7128 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7129 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7130 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7131 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7132 bp->common.flash_size, bp->common.flash_size);
7134 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7135 bp->link_params.shmem_base = bp->common.shmem_base;
7136 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7138 if (!bp->common.shmem_base ||
7139 (bp->common.shmem_base < 0xA0000) ||
7140 (bp->common.shmem_base >= 0xC0000)) {
7141 BNX2X_DEV_INFO("MCP not active\n");
7142 bp->flags |= NO_MCP_FLAG;
7146 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7147 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7148 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7149 BNX2X_ERR("BAD MCP validity signature\n");
7151 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7152 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7154 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7155 bp->common.hw_config, bp->common.board);
7157 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7158 SHARED_HW_CFG_LED_MODE_MASK) >>
7159 SHARED_HW_CFG_LED_MODE_SHIFT);
7161 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7162 bp->common.bc_ver = val;
7163 BNX2X_DEV_INFO("bc_ver %X\n", val);
7164 if (val < BNX2X_BC_VER) {
7165 /* for now only warn
7166 * later we might need to enforce this */
7167 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7168 " please upgrade BC\n", BNX2X_BC_VER, val);
7171 if (BP_E1HVN(bp) == 0) {
7172 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7173 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7175 /* no WOL capability for E1HVN != 0 */
7176 bp->flags |= NO_WOL_FLAG;
7178 BNX2X_DEV_INFO("%sWoL capable\n",
7179 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7181 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7182 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7183 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7184 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7186 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7187 val, val2, val3, val4);
7190 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7193 int port = BP_PORT(bp);
7196 switch (switch_cfg) {
7198 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7201 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7202 switch (ext_phy_type) {
7203 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7204 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7207 bp->port.supported |= (SUPPORTED_10baseT_Half |
7208 SUPPORTED_10baseT_Full |
7209 SUPPORTED_100baseT_Half |
7210 SUPPORTED_100baseT_Full |
7211 SUPPORTED_1000baseT_Full |
7212 SUPPORTED_2500baseX_Full |
7217 SUPPORTED_Asym_Pause);
7220 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7221 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7224 bp->port.supported |= (SUPPORTED_10baseT_Half |
7225 SUPPORTED_10baseT_Full |
7226 SUPPORTED_100baseT_Half |
7227 SUPPORTED_100baseT_Full |
7228 SUPPORTED_1000baseT_Full |
7233 SUPPORTED_Asym_Pause);
7237 BNX2X_ERR("NVRAM config error. "
7238 "BAD SerDes ext_phy_config 0x%x\n",
7239 bp->link_params.ext_phy_config);
7243 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7245 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7248 case SWITCH_CFG_10G:
7249 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7252 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7253 switch (ext_phy_type) {
7254 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7255 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7258 bp->port.supported |= (SUPPORTED_10baseT_Half |
7259 SUPPORTED_10baseT_Full |
7260 SUPPORTED_100baseT_Half |
7261 SUPPORTED_100baseT_Full |
7262 SUPPORTED_1000baseT_Full |
7263 SUPPORTED_2500baseX_Full |
7264 SUPPORTED_10000baseT_Full |
7269 SUPPORTED_Asym_Pause);
7272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7273 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7276 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7279 SUPPORTED_Asym_Pause);
7282 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7283 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7286 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7287 SUPPORTED_1000baseT_Full |
7290 SUPPORTED_Asym_Pause);
7293 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7294 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7297 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7298 SUPPORTED_1000baseT_Full |
7302 SUPPORTED_Asym_Pause);
7305 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7306 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7309 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7310 SUPPORTED_2500baseX_Full |
7311 SUPPORTED_1000baseT_Full |
7315 SUPPORTED_Asym_Pause);
7318 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7319 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7322 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7326 SUPPORTED_Asym_Pause);
7329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7330 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7331 bp->link_params.ext_phy_config);
7335 BNX2X_ERR("NVRAM config error. "
7336 "BAD XGXS ext_phy_config 0x%x\n",
7337 bp->link_params.ext_phy_config);
7341 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7343 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7348 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7349 bp->port.link_config);
7352 bp->link_params.phy_addr = bp->port.phy_addr;
7354 /* mask what we support according to speed_cap_mask */
7355 if (!(bp->link_params.speed_cap_mask &
7356 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7357 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7359 if (!(bp->link_params.speed_cap_mask &
7360 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7361 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7363 if (!(bp->link_params.speed_cap_mask &
7364 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7365 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7367 if (!(bp->link_params.speed_cap_mask &
7368 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7369 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7371 if (!(bp->link_params.speed_cap_mask &
7372 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7373 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7374 SUPPORTED_1000baseT_Full);
7376 if (!(bp->link_params.speed_cap_mask &
7377 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7378 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7380 if (!(bp->link_params.speed_cap_mask &
7381 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7382 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7384 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7387 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7389 bp->link_params.req_duplex = DUPLEX_FULL;
7391 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7392 case PORT_FEATURE_LINK_SPEED_AUTO:
7393 if (bp->port.supported & SUPPORTED_Autoneg) {
7394 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7395 bp->port.advertising = bp->port.supported;
7398 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7400 if ((ext_phy_type ==
7401 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7403 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7404 /* force 10G, no AN */
7405 bp->link_params.req_line_speed = SPEED_10000;
7406 bp->port.advertising =
7407 (ADVERTISED_10000baseT_Full |
7411 BNX2X_ERR("NVRAM config error. "
7412 "Invalid link_config 0x%x"
7413 " Autoneg not supported\n",
7414 bp->port.link_config);
7419 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7420 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7421 bp->link_params.req_line_speed = SPEED_10;
7422 bp->port.advertising = (ADVERTISED_10baseT_Full |
7425 BNX2X_ERR("NVRAM config error. "
7426 "Invalid link_config 0x%x"
7427 " speed_cap_mask 0x%x\n",
7428 bp->port.link_config,
7429 bp->link_params.speed_cap_mask);
7434 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7435 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7436 bp->link_params.req_line_speed = SPEED_10;
7437 bp->link_params.req_duplex = DUPLEX_HALF;
7438 bp->port.advertising = (ADVERTISED_10baseT_Half |
7441 BNX2X_ERR("NVRAM config error. "
7442 "Invalid link_config 0x%x"
7443 " speed_cap_mask 0x%x\n",
7444 bp->port.link_config,
7445 bp->link_params.speed_cap_mask);
7450 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7451 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7452 bp->link_params.req_line_speed = SPEED_100;
7453 bp->port.advertising = (ADVERTISED_100baseT_Full |
7456 BNX2X_ERR("NVRAM config error. "
7457 "Invalid link_config 0x%x"
7458 " speed_cap_mask 0x%x\n",
7459 bp->port.link_config,
7460 bp->link_params.speed_cap_mask);
7465 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7466 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7467 bp->link_params.req_line_speed = SPEED_100;
7468 bp->link_params.req_duplex = DUPLEX_HALF;
7469 bp->port.advertising = (ADVERTISED_100baseT_Half |
7472 BNX2X_ERR("NVRAM config error. "
7473 "Invalid link_config 0x%x"
7474 " speed_cap_mask 0x%x\n",
7475 bp->port.link_config,
7476 bp->link_params.speed_cap_mask);
7481 case PORT_FEATURE_LINK_SPEED_1G:
7482 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7483 bp->link_params.req_line_speed = SPEED_1000;
7484 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7487 BNX2X_ERR("NVRAM config error. "
7488 "Invalid link_config 0x%x"
7489 " speed_cap_mask 0x%x\n",
7490 bp->port.link_config,
7491 bp->link_params.speed_cap_mask);
7496 case PORT_FEATURE_LINK_SPEED_2_5G:
7497 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7498 bp->link_params.req_line_speed = SPEED_2500;
7499 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7502 BNX2X_ERR("NVRAM config error. "
7503 "Invalid link_config 0x%x"
7504 " speed_cap_mask 0x%x\n",
7505 bp->port.link_config,
7506 bp->link_params.speed_cap_mask);
7511 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7512 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7513 case PORT_FEATURE_LINK_SPEED_10G_KR:
7514 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7515 bp->link_params.req_line_speed = SPEED_10000;
7516 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7519 BNX2X_ERR("NVRAM config error. "
7520 "Invalid link_config 0x%x"
7521 " speed_cap_mask 0x%x\n",
7522 bp->port.link_config,
7523 bp->link_params.speed_cap_mask);
7529 BNX2X_ERR("NVRAM config error. "
7530 "BAD link speed link_config 0x%x\n",
7531 bp->port.link_config);
7532 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7533 bp->port.advertising = bp->port.supported;
7537 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7538 PORT_FEATURE_FLOW_CONTROL_MASK);
7539 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7540 !(bp->port.supported & SUPPORTED_Autoneg))
7541 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7543 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7544 " advertising 0x%x\n",
7545 bp->link_params.req_line_speed,
7546 bp->link_params.req_duplex,
7547 bp->link_params.req_flow_ctrl, bp->port.advertising);
7550 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7552 int port = BP_PORT(bp);
7555 bp->link_params.bp = bp;
7556 bp->link_params.port = port;
7558 bp->link_params.serdes_config =
7559 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7560 bp->link_params.lane_config =
7561 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7562 bp->link_params.ext_phy_config =
7564 dev_info.port_hw_config[port].external_phy_config);
7565 bp->link_params.speed_cap_mask =
7567 dev_info.port_hw_config[port].speed_capability_mask);
7569 bp->port.link_config =
7570 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7572 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7573 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7574 " link_config 0x%08x\n",
7575 bp->link_params.serdes_config,
7576 bp->link_params.lane_config,
7577 bp->link_params.ext_phy_config,
7578 bp->link_params.speed_cap_mask, bp->port.link_config);
7580 bp->link_params.switch_cfg = (bp->port.link_config &
7581 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7582 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7584 bnx2x_link_settings_requested(bp);
7586 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7587 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7588 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7589 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7590 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7591 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7592 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7593 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7594 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7595 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7598 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7600 int func = BP_FUNC(bp);
7604 bnx2x_get_common_hwinfo(bp);
7608 if (CHIP_IS_E1H(bp)) {
7610 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7612 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7613 FUNC_MF_CFG_E1HOV_TAG_MASK);
7614 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7618 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7620 func, bp->e1hov, bp->e1hov);
7622 BNX2X_DEV_INFO("Single function mode\n");
7624 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7625 " aborting\n", func);
7631 if (!BP_NOMCP(bp)) {
7632 bnx2x_get_port_hwinfo(bp);
7634 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7635 DRV_MSG_SEQ_NUMBER_MASK);
7636 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7640 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7641 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7642 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7643 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7644 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7645 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7646 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7647 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7648 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7649 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7650 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7652 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7660 /* only supposed to happen on emulation/FPGA */
7661 BNX2X_ERR("warning random MAC workaround active\n");
7662 random_ether_addr(bp->dev->dev_addr);
7663 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7669 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7671 int func = BP_FUNC(bp);
7674 /* Disable interrupt handling until HW is initialized */
7675 atomic_set(&bp->intr_sem, 1);
7677 mutex_init(&bp->port.phy_mutex);
7679 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7680 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7682 rc = bnx2x_get_hwinfo(bp);
7684 /* need to reset chip if undi was active */
7686 bnx2x_undi_unload(bp);
7688 if (CHIP_REV_IS_FPGA(bp))
7689 printk(KERN_ERR PFX "FPGA detected\n");
7691 if (BP_NOMCP(bp) && (func == 0))
7693 "MCP disabled, must load devices in order!\n");
7695 /* Set multi queue mode */
7696 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7697 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
7699 "Multi disabled since int_mode requested is not MSI-X\n");
7700 multi_mode = ETH_RSS_MODE_DISABLED;
7702 bp->multi_mode = multi_mode;
7707 bp->flags &= ~TPA_ENABLE_FLAG;
7708 bp->dev->features &= ~NETIF_F_LRO;
7710 bp->flags |= TPA_ENABLE_FLAG;
7711 bp->dev->features |= NETIF_F_LRO;
7715 bp->tx_ring_size = MAX_TX_AVAIL;
7716 bp->rx_ring_size = MAX_RX_AVAIL;
7724 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7725 bp->current_interval = (poll ? poll : bp->timer_interval);
7727 init_timer(&bp->timer);
7728 bp->timer.expires = jiffies + bp->current_interval;
7729 bp->timer.data = (unsigned long) bp;
7730 bp->timer.function = bnx2x_timer;
7736 * ethtool service functions
7739 /* All ethtool functions called with rtnl_lock */
7741 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7743 struct bnx2x *bp = netdev_priv(dev);
7745 cmd->supported = bp->port.supported;
7746 cmd->advertising = bp->port.advertising;
7748 if (netif_carrier_ok(dev)) {
7749 cmd->speed = bp->link_vars.line_speed;
7750 cmd->duplex = bp->link_vars.duplex;
7752 cmd->speed = bp->link_params.req_line_speed;
7753 cmd->duplex = bp->link_params.req_duplex;
7758 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7759 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7760 if (vn_max_rate < cmd->speed)
7761 cmd->speed = vn_max_rate;
7764 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7766 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7768 switch (ext_phy_type) {
7769 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7770 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7771 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7772 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7773 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7774 cmd->port = PORT_FIBRE;
7777 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7778 cmd->port = PORT_TP;
7781 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7782 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7783 bp->link_params.ext_phy_config);
7787 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7788 bp->link_params.ext_phy_config);
7792 cmd->port = PORT_TP;
7794 cmd->phy_address = bp->port.phy_addr;
7795 cmd->transceiver = XCVR_INTERNAL;
7797 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7798 cmd->autoneg = AUTONEG_ENABLE;
7800 cmd->autoneg = AUTONEG_DISABLE;
7805 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7806 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7807 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7808 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7809 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7810 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7811 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7816 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7818 struct bnx2x *bp = netdev_priv(dev);
7824 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7825 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7826 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7827 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7828 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7829 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7830 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7832 if (cmd->autoneg == AUTONEG_ENABLE) {
7833 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7834 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7838 /* advertise the requested speed and duplex if supported */
7839 cmd->advertising &= bp->port.supported;
7841 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7842 bp->link_params.req_duplex = DUPLEX_FULL;
7843 bp->port.advertising |= (ADVERTISED_Autoneg |
7846 } else { /* forced speed */
7847 /* advertise the requested speed and duplex if supported */
7848 switch (cmd->speed) {
7850 if (cmd->duplex == DUPLEX_FULL) {
7851 if (!(bp->port.supported &
7852 SUPPORTED_10baseT_Full)) {
7854 "10M full not supported\n");
7858 advertising = (ADVERTISED_10baseT_Full |
7861 if (!(bp->port.supported &
7862 SUPPORTED_10baseT_Half)) {
7864 "10M half not supported\n");
7868 advertising = (ADVERTISED_10baseT_Half |
7874 if (cmd->duplex == DUPLEX_FULL) {
7875 if (!(bp->port.supported &
7876 SUPPORTED_100baseT_Full)) {
7878 "100M full not supported\n");
7882 advertising = (ADVERTISED_100baseT_Full |
7885 if (!(bp->port.supported &
7886 SUPPORTED_100baseT_Half)) {
7888 "100M half not supported\n");
7892 advertising = (ADVERTISED_100baseT_Half |
7898 if (cmd->duplex != DUPLEX_FULL) {
7899 DP(NETIF_MSG_LINK, "1G half not supported\n");
7903 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7904 DP(NETIF_MSG_LINK, "1G full not supported\n");
7908 advertising = (ADVERTISED_1000baseT_Full |
7913 if (cmd->duplex != DUPLEX_FULL) {
7915 "2.5G half not supported\n");
7919 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7921 "2.5G full not supported\n");
7925 advertising = (ADVERTISED_2500baseX_Full |
7930 if (cmd->duplex != DUPLEX_FULL) {
7931 DP(NETIF_MSG_LINK, "10G half not supported\n");
7935 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7936 DP(NETIF_MSG_LINK, "10G full not supported\n");
7940 advertising = (ADVERTISED_10000baseT_Full |
7945 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7949 bp->link_params.req_line_speed = cmd->speed;
7950 bp->link_params.req_duplex = cmd->duplex;
7951 bp->port.advertising = advertising;
7954 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7955 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7956 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7957 bp->port.advertising);
7959 if (netif_running(dev)) {
7960 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7967 #define PHY_FW_VER_LEN 10
7969 static void bnx2x_get_drvinfo(struct net_device *dev,
7970 struct ethtool_drvinfo *info)
7972 struct bnx2x *bp = netdev_priv(dev);
7973 u8 phy_fw_ver[PHY_FW_VER_LEN];
7975 strcpy(info->driver, DRV_MODULE_NAME);
7976 strcpy(info->version, DRV_MODULE_VERSION);
7978 phy_fw_ver[0] = '\0';
7980 bnx2x_acquire_phy_lock(bp);
7981 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7982 (bp->state != BNX2X_STATE_CLOSED),
7983 phy_fw_ver, PHY_FW_VER_LEN);
7984 bnx2x_release_phy_lock(bp);
7987 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7988 (bp->common.bc_ver & 0xff0000) >> 16,
7989 (bp->common.bc_ver & 0xff00) >> 8,
7990 (bp->common.bc_ver & 0xff),
7991 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7992 strcpy(info->bus_info, pci_name(bp->pdev));
7993 info->n_stats = BNX2X_NUM_STATS;
7994 info->testinfo_len = BNX2X_NUM_TESTS;
7995 info->eedump_len = bp->common.flash_size;
7996 info->regdump_len = 0;
7999 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8001 struct bnx2x *bp = netdev_priv(dev);
8003 if (bp->flags & NO_WOL_FLAG) {
8007 wol->supported = WAKE_MAGIC;
8009 wol->wolopts = WAKE_MAGIC;
8013 memset(&wol->sopass, 0, sizeof(wol->sopass));
8016 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8018 struct bnx2x *bp = netdev_priv(dev);
8020 if (wol->wolopts & ~WAKE_MAGIC)
8023 if (wol->wolopts & WAKE_MAGIC) {
8024 if (bp->flags & NO_WOL_FLAG)
8034 static u32 bnx2x_get_msglevel(struct net_device *dev)
8036 struct bnx2x *bp = netdev_priv(dev);
8038 return bp->msglevel;
8041 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8043 struct bnx2x *bp = netdev_priv(dev);
8045 if (capable(CAP_NET_ADMIN))
8046 bp->msglevel = level;
8049 static int bnx2x_nway_reset(struct net_device *dev)
8051 struct bnx2x *bp = netdev_priv(dev);
8056 if (netif_running(dev)) {
8057 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8064 static int bnx2x_get_eeprom_len(struct net_device *dev)
8066 struct bnx2x *bp = netdev_priv(dev);
8068 return bp->common.flash_size;
8071 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8073 int port = BP_PORT(bp);
8077 /* adjust timeout for emulation/FPGA */
8078 count = NVRAM_TIMEOUT_COUNT;
8079 if (CHIP_REV_IS_SLOW(bp))
8082 /* request access to nvram interface */
8083 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8084 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8086 for (i = 0; i < count*10; i++) {
8087 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8088 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8094 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8095 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8102 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8104 int port = BP_PORT(bp);
8108 /* adjust timeout for emulation/FPGA */
8109 count = NVRAM_TIMEOUT_COUNT;
8110 if (CHIP_REV_IS_SLOW(bp))
8113 /* relinquish nvram interface */
8114 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8115 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8117 for (i = 0; i < count*10; i++) {
8118 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8119 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8125 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8126 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8133 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8137 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8139 /* enable both bits, even on read */
8140 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8141 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8142 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8145 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8149 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8151 /* disable both bits, even after read */
8152 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8153 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8154 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8157 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8163 /* build the command word */
8164 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8166 /* need to clear DONE bit separately */
8167 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8169 /* address of the NVRAM to read from */
8170 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8171 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8173 /* issue a read command */
8174 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8176 /* adjust timeout for emulation/FPGA */
8177 count = NVRAM_TIMEOUT_COUNT;
8178 if (CHIP_REV_IS_SLOW(bp))
8181 /* wait for completion */
8184 for (i = 0; i < count; i++) {
8186 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8188 if (val & MCPR_NVM_COMMAND_DONE) {
8189 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8190 /* we read nvram data in cpu order
8191 * but ethtool sees it as an array of bytes
8192 * converting to big-endian will do the work */
8193 val = cpu_to_be32(val);
8203 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8210 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8212 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8217 if (offset + buf_size > bp->common.flash_size) {
8218 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8219 " buf_size (0x%x) > flash_size (0x%x)\n",
8220 offset, buf_size, bp->common.flash_size);
8224 /* request access to nvram interface */
8225 rc = bnx2x_acquire_nvram_lock(bp);
8229 /* enable access to nvram interface */
8230 bnx2x_enable_nvram_access(bp);
8232 /* read the first word(s) */
8233 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8234 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8235 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8236 memcpy(ret_buf, &val, 4);
8238 /* advance to the next dword */
8239 offset += sizeof(u32);
8240 ret_buf += sizeof(u32);
8241 buf_size -= sizeof(u32);
8246 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8247 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8248 memcpy(ret_buf, &val, 4);
8251 /* disable access to nvram interface */
8252 bnx2x_disable_nvram_access(bp);
8253 bnx2x_release_nvram_lock(bp);
8258 static int bnx2x_get_eeprom(struct net_device *dev,
8259 struct ethtool_eeprom *eeprom, u8 *eebuf)
8261 struct bnx2x *bp = netdev_priv(dev);
8264 if (!netif_running(dev))
8267 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8268 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8269 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8270 eeprom->len, eeprom->len);
8272 /* parameters already validated in ethtool_get_eeprom */
8274 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8279 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8284 /* build the command word */
8285 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8287 /* need to clear DONE bit separately */
8288 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8290 /* write the data */
8291 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8293 /* address of the NVRAM to write to */
8294 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8295 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8297 /* issue the write command */
8298 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8300 /* adjust timeout for emulation/FPGA */
8301 count = NVRAM_TIMEOUT_COUNT;
8302 if (CHIP_REV_IS_SLOW(bp))
8305 /* wait for completion */
8307 for (i = 0; i < count; i++) {
8309 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8310 if (val & MCPR_NVM_COMMAND_DONE) {
8319 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8321 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8329 if (offset + buf_size > bp->common.flash_size) {
8330 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8331 " buf_size (0x%x) > flash_size (0x%x)\n",
8332 offset, buf_size, bp->common.flash_size);
8336 /* request access to nvram interface */
8337 rc = bnx2x_acquire_nvram_lock(bp);
8341 /* enable access to nvram interface */
8342 bnx2x_enable_nvram_access(bp);
8344 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8345 align_offset = (offset & ~0x03);
8346 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8349 val &= ~(0xff << BYTE_OFFSET(offset));
8350 val |= (*data_buf << BYTE_OFFSET(offset));
8352 /* nvram data is returned as an array of bytes
8353 * convert it back to cpu order */
8354 val = be32_to_cpu(val);
8356 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8360 /* disable access to nvram interface */
8361 bnx2x_disable_nvram_access(bp);
8362 bnx2x_release_nvram_lock(bp);
8367 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8375 if (buf_size == 1) /* ethtool */
8376 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8378 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8380 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8385 if (offset + buf_size > bp->common.flash_size) {
8386 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8387 " buf_size (0x%x) > flash_size (0x%x)\n",
8388 offset, buf_size, bp->common.flash_size);
8392 /* request access to nvram interface */
8393 rc = bnx2x_acquire_nvram_lock(bp);
8397 /* enable access to nvram interface */
8398 bnx2x_enable_nvram_access(bp);
8401 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8402 while ((written_so_far < buf_size) && (rc == 0)) {
8403 if (written_so_far == (buf_size - sizeof(u32)))
8404 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8405 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8406 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8407 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8408 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8410 memcpy(&val, data_buf, 4);
8412 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8414 /* advance to the next dword */
8415 offset += sizeof(u32);
8416 data_buf += sizeof(u32);
8417 written_so_far += sizeof(u32);
8421 /* disable access to nvram interface */
8422 bnx2x_disable_nvram_access(bp);
8423 bnx2x_release_nvram_lock(bp);
8428 static int bnx2x_set_eeprom(struct net_device *dev,
8429 struct ethtool_eeprom *eeprom, u8 *eebuf)
8431 struct bnx2x *bp = netdev_priv(dev);
8434 if (!netif_running(dev))
8437 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8438 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8439 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8440 eeprom->len, eeprom->len);
8442 /* parameters already validated in ethtool_set_eeprom */
8444 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8445 if (eeprom->magic == 0x00504859)
8448 bnx2x_acquire_phy_lock(bp);
8449 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8450 bp->link_params.ext_phy_config,
8451 (bp->state != BNX2X_STATE_CLOSED),
8452 eebuf, eeprom->len);
8453 if ((bp->state == BNX2X_STATE_OPEN) ||
8454 (bp->state == BNX2X_STATE_DISABLED)) {
8455 rc |= bnx2x_link_reset(&bp->link_params,
8457 rc |= bnx2x_phy_init(&bp->link_params,
8460 bnx2x_release_phy_lock(bp);
8462 } else /* Only the PMF can access the PHY */
8465 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8470 static int bnx2x_get_coalesce(struct net_device *dev,
8471 struct ethtool_coalesce *coal)
8473 struct bnx2x *bp = netdev_priv(dev);
8475 memset(coal, 0, sizeof(struct ethtool_coalesce));
8477 coal->rx_coalesce_usecs = bp->rx_ticks;
8478 coal->tx_coalesce_usecs = bp->tx_ticks;
8483 static int bnx2x_set_coalesce(struct net_device *dev,
8484 struct ethtool_coalesce *coal)
8486 struct bnx2x *bp = netdev_priv(dev);
8488 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8489 if (bp->rx_ticks > 3000)
8490 bp->rx_ticks = 3000;
8492 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8493 if (bp->tx_ticks > 0x3000)
8494 bp->tx_ticks = 0x3000;
8496 if (netif_running(dev))
8497 bnx2x_update_coalesce(bp);
8502 static void bnx2x_get_ringparam(struct net_device *dev,
8503 struct ethtool_ringparam *ering)
8505 struct bnx2x *bp = netdev_priv(dev);
8507 ering->rx_max_pending = MAX_RX_AVAIL;
8508 ering->rx_mini_max_pending = 0;
8509 ering->rx_jumbo_max_pending = 0;
8511 ering->rx_pending = bp->rx_ring_size;
8512 ering->rx_mini_pending = 0;
8513 ering->rx_jumbo_pending = 0;
8515 ering->tx_max_pending = MAX_TX_AVAIL;
8516 ering->tx_pending = bp->tx_ring_size;
8519 static int bnx2x_set_ringparam(struct net_device *dev,
8520 struct ethtool_ringparam *ering)
8522 struct bnx2x *bp = netdev_priv(dev);
8525 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8526 (ering->tx_pending > MAX_TX_AVAIL) ||
8527 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8530 bp->rx_ring_size = ering->rx_pending;
8531 bp->tx_ring_size = ering->tx_pending;
8533 if (netif_running(dev)) {
8534 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8535 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8541 static void bnx2x_get_pauseparam(struct net_device *dev,
8542 struct ethtool_pauseparam *epause)
8544 struct bnx2x *bp = netdev_priv(dev);
8546 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8547 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8549 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8550 BNX2X_FLOW_CTRL_RX);
8551 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8552 BNX2X_FLOW_CTRL_TX);
8554 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8555 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8556 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8559 static int bnx2x_set_pauseparam(struct net_device *dev,
8560 struct ethtool_pauseparam *epause)
8562 struct bnx2x *bp = netdev_priv(dev);
8567 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8568 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8569 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8571 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8573 if (epause->rx_pause)
8574 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8576 if (epause->tx_pause)
8577 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8579 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8580 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8582 if (epause->autoneg) {
8583 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8584 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8588 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8589 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8593 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8595 if (netif_running(dev)) {
8596 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8603 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8605 struct bnx2x *bp = netdev_priv(dev);
8609 /* TPA requires Rx CSUM offloading */
8610 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8611 if (!(dev->features & NETIF_F_LRO)) {
8612 dev->features |= NETIF_F_LRO;
8613 bp->flags |= TPA_ENABLE_FLAG;
8617 } else if (dev->features & NETIF_F_LRO) {
8618 dev->features &= ~NETIF_F_LRO;
8619 bp->flags &= ~TPA_ENABLE_FLAG;
8623 if (changed && netif_running(dev)) {
8624 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8625 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8631 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8633 struct bnx2x *bp = netdev_priv(dev);
8638 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8640 struct bnx2x *bp = netdev_priv(dev);
8645 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8646 TPA'ed packets will be discarded due to wrong TCP CSUM */
8648 u32 flags = ethtool_op_get_flags(dev);
8650 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8656 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8659 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8660 dev->features |= NETIF_F_TSO6;
8662 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8663 dev->features &= ~NETIF_F_TSO6;
8669 static const struct {
8670 char string[ETH_GSTRING_LEN];
8671 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8672 { "register_test (offline)" },
8673 { "memory_test (offline)" },
8674 { "loopback_test (offline)" },
8675 { "nvram_test (online)" },
8676 { "interrupt_test (online)" },
8677 { "link_test (online)" },
8678 { "idle check (online)" },
8679 { "MC errors (online)" }
8682 static int bnx2x_self_test_count(struct net_device *dev)
8684 return BNX2X_NUM_TESTS;
8687 static int bnx2x_test_registers(struct bnx2x *bp)
8689 int idx, i, rc = -ENODEV;
8691 int port = BP_PORT(bp);
8692 static const struct {
8697 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8698 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8699 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8700 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8701 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8702 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8703 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8704 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8705 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8706 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8707 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8708 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8709 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8710 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8711 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8712 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8713 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8714 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8715 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8716 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8717 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8718 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8719 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8720 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8721 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8722 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8723 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8724 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8725 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8726 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8727 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8728 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8729 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8730 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8731 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8732 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8733 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8734 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8736 { 0xffffffff, 0, 0x00000000 }
8739 if (!netif_running(bp->dev))
8742 /* Repeat the test twice:
8743 First by writing 0x00000000, second by writing 0xffffffff */
8744 for (idx = 0; idx < 2; idx++) {
8751 wr_val = 0xffffffff;
8755 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8756 u32 offset, mask, save_val, val;
8758 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8759 mask = reg_tbl[i].mask;
8761 save_val = REG_RD(bp, offset);
8763 REG_WR(bp, offset, wr_val);
8764 val = REG_RD(bp, offset);
8766 /* Restore the original register's value */
8767 REG_WR(bp, offset, save_val);
8769 /* verify that value is as expected value */
8770 if ((val & mask) != (wr_val & mask))
8781 static int bnx2x_test_memory(struct bnx2x *bp)
8783 int i, j, rc = -ENODEV;
8785 static const struct {
8789 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8790 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8791 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8792 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8793 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8794 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8795 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8799 static const struct {
8805 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8806 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8807 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8808 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8809 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8810 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8812 { NULL, 0xffffffff, 0, 0 }
8815 if (!netif_running(bp->dev))
8818 /* Go through all the memories */
8819 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8820 for (j = 0; j < mem_tbl[i].size; j++)
8821 REG_RD(bp, mem_tbl[i].offset + j*4);
8823 /* Check the parity status */
8824 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8825 val = REG_RD(bp, prty_tbl[i].offset);
8826 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8827 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8829 "%s is 0x%x\n", prty_tbl[i].name, val);
8840 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8845 while (bnx2x_link_test(bp) && cnt--)
8849 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8851 unsigned int pkt_size, num_pkts, i;
8852 struct sk_buff *skb;
8853 unsigned char *packet;
8854 struct bnx2x_fastpath *fp = &bp->fp[0];
8855 u16 tx_start_idx, tx_idx;
8856 u16 rx_start_idx, rx_idx;
8858 struct sw_tx_bd *tx_buf;
8859 struct eth_tx_bd *tx_bd;
8861 union eth_rx_cqe *cqe;
8863 struct sw_rx_bd *rx_buf;
8867 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8868 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8869 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8871 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8873 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8874 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8875 /* wait until link state is restored */
8877 while (cnt-- && bnx2x_test_link(&bp->link_params,
8884 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8887 goto test_loopback_exit;
8889 packet = skb_put(skb, pkt_size);
8890 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8891 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8892 for (i = ETH_HLEN; i < pkt_size; i++)
8893 packet[i] = (unsigned char) (i & 0xff);
8896 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8897 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8899 pkt_prod = fp->tx_pkt_prod++;
8900 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8901 tx_buf->first_bd = fp->tx_bd_prod;
8904 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8905 mapping = pci_map_single(bp->pdev, skb->data,
8906 skb_headlen(skb), PCI_DMA_TODEVICE);
8907 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8908 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8909 tx_bd->nbd = cpu_to_le16(1);
8910 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8911 tx_bd->vlan = cpu_to_le16(pkt_prod);
8912 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8913 ETH_TX_BD_FLAGS_END_BD);
8914 tx_bd->general_data = ((UNICAST_ADDRESS <<
8915 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8919 fp->hw_tx_prods->bds_prod =
8920 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8921 mb(); /* FW restriction: must not reorder writing nbd and packets */
8922 fp->hw_tx_prods->packets_prod =
8923 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8924 DOORBELL(bp, FP_IDX(fp), 0);
8930 bp->dev->trans_start = jiffies;
8934 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8935 if (tx_idx != tx_start_idx + num_pkts)
8936 goto test_loopback_exit;
8938 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8939 if (rx_idx != rx_start_idx + num_pkts)
8940 goto test_loopback_exit;
8942 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8943 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8944 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8945 goto test_loopback_rx_exit;
8947 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8948 if (len != pkt_size)
8949 goto test_loopback_rx_exit;
8951 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8953 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8954 for (i = ETH_HLEN; i < pkt_size; i++)
8955 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8956 goto test_loopback_rx_exit;
8960 test_loopback_rx_exit:
8962 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8963 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8964 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8965 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8967 /* Update producers */
8968 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8972 bp->link_params.loopback_mode = LOOPBACK_NONE;
8977 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8981 if (!netif_running(bp->dev))
8982 return BNX2X_LOOPBACK_FAILED;
8984 bnx2x_netif_stop(bp, 1);
8985 bnx2x_acquire_phy_lock(bp);
8987 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8988 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8989 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8992 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8993 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8994 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8997 bnx2x_release_phy_lock(bp);
8998 bnx2x_netif_start(bp);
9003 #define CRC32_RESIDUAL 0xdebb20e3
9005 static int bnx2x_test_nvram(struct bnx2x *bp)
9007 static const struct {
9011 { 0, 0x14 }, /* bootstrap */
9012 { 0x14, 0xec }, /* dir */
9013 { 0x100, 0x350 }, /* manuf_info */
9014 { 0x450, 0xf0 }, /* feature_info */
9015 { 0x640, 0x64 }, /* upgrade_key_info */
9017 { 0x708, 0x70 }, /* manuf_key_info */
9022 u8 *data = (u8 *)buf;
9026 rc = bnx2x_nvram_read(bp, 0, data, 4);
9028 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9029 goto test_nvram_exit;
9032 magic = be32_to_cpu(buf[0]);
9033 if (magic != 0x669955aa) {
9034 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9036 goto test_nvram_exit;
9039 for (i = 0; nvram_tbl[i].size; i++) {
9041 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9045 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9046 goto test_nvram_exit;
9049 csum = ether_crc_le(nvram_tbl[i].size, data);
9050 if (csum != CRC32_RESIDUAL) {
9052 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9054 goto test_nvram_exit;
9062 static int bnx2x_test_intr(struct bnx2x *bp)
9064 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9067 if (!netif_running(bp->dev))
9070 config->hdr.length = 0;
9072 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9074 config->hdr.offset = BP_FUNC(bp);
9075 config->hdr.client_id = BP_CL_ID(bp);
9076 config->hdr.reserved1 = 0;
9078 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9079 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9080 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9082 bp->set_mac_pending++;
9083 for (i = 0; i < 10; i++) {
9084 if (!bp->set_mac_pending)
9086 msleep_interruptible(10);
9095 static void bnx2x_self_test(struct net_device *dev,
9096 struct ethtool_test *etest, u64 *buf)
9098 struct bnx2x *bp = netdev_priv(dev);
9100 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9102 if (!netif_running(dev))
9105 /* offline tests are not supported in MF mode */
9107 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9109 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9112 link_up = bp->link_vars.link_up;
9113 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9114 bnx2x_nic_load(bp, LOAD_DIAG);
9115 /* wait until link state is restored */
9116 bnx2x_wait_for_link(bp, link_up);
9118 if (bnx2x_test_registers(bp) != 0) {
9120 etest->flags |= ETH_TEST_FL_FAILED;
9122 if (bnx2x_test_memory(bp) != 0) {
9124 etest->flags |= ETH_TEST_FL_FAILED;
9126 buf[2] = bnx2x_test_loopback(bp, link_up);
9128 etest->flags |= ETH_TEST_FL_FAILED;
9130 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9131 bnx2x_nic_load(bp, LOAD_NORMAL);
9132 /* wait until link state is restored */
9133 bnx2x_wait_for_link(bp, link_up);
9135 if (bnx2x_test_nvram(bp) != 0) {
9137 etest->flags |= ETH_TEST_FL_FAILED;
9139 if (bnx2x_test_intr(bp) != 0) {
9141 etest->flags |= ETH_TEST_FL_FAILED;
9144 if (bnx2x_link_test(bp) != 0) {
9146 etest->flags |= ETH_TEST_FL_FAILED;
9148 buf[7] = bnx2x_mc_assert(bp);
9150 etest->flags |= ETH_TEST_FL_FAILED;
9152 #ifdef BNX2X_EXTRA_DEBUG
9153 bnx2x_panic_dump(bp);
9157 static const struct {
9161 #define STATS_FLAGS_PORT 1
9162 #define STATS_FLAGS_FUNC 2
9163 u8 string[ETH_GSTRING_LEN];
9164 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9165 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9166 8, STATS_FLAGS_FUNC, "rx_bytes" },
9167 { STATS_OFFSET32(error_bytes_received_hi),
9168 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9169 { STATS_OFFSET32(total_bytes_transmitted_hi),
9170 8, STATS_FLAGS_FUNC, "tx_bytes" },
9171 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9172 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9173 { STATS_OFFSET32(total_unicast_packets_received_hi),
9174 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9175 { STATS_OFFSET32(total_multicast_packets_received_hi),
9176 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9177 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9178 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9179 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9180 8, STATS_FLAGS_FUNC, "tx_packets" },
9181 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9182 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9183 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9184 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9185 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9186 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9187 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9188 8, STATS_FLAGS_PORT, "rx_align_errors" },
9189 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9190 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9191 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9192 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9193 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9194 8, STATS_FLAGS_PORT, "tx_deferred" },
9195 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9196 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9197 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9198 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9199 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9200 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9201 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9202 8, STATS_FLAGS_PORT, "rx_fragments" },
9203 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9204 8, STATS_FLAGS_PORT, "rx_jabbers" },
9205 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9206 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9207 { STATS_OFFSET32(jabber_packets_received),
9208 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9209 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9210 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9211 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9212 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9213 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9214 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9215 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9216 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9217 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9218 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9219 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9220 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9221 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9222 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9223 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9224 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9225 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9226 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9227 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9228 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9229 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9230 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9231 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9232 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9233 { STATS_OFFSET32(mac_filter_discard),
9234 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9235 { STATS_OFFSET32(no_buff_discard),
9236 4, STATS_FLAGS_FUNC, "rx_discards" },
9237 { STATS_OFFSET32(xxoverflow_discard),
9238 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9239 { STATS_OFFSET32(brb_drop_hi),
9240 8, STATS_FLAGS_PORT, "brb_discard" },
9241 { STATS_OFFSET32(brb_truncate_hi),
9242 8, STATS_FLAGS_PORT, "brb_truncate" },
9243 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9244 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9245 { STATS_OFFSET32(rx_skb_alloc_failed),
9246 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9247 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9248 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9251 #define IS_NOT_E1HMF_STAT(bp, i) \
9252 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9254 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9256 struct bnx2x *bp = netdev_priv(dev);
9259 switch (stringset) {
9261 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9262 if (IS_NOT_E1HMF_STAT(bp, i))
9264 strcpy(buf + j*ETH_GSTRING_LEN,
9265 bnx2x_stats_arr[i].string);
9271 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9276 static int bnx2x_get_stats_count(struct net_device *dev)
9278 struct bnx2x *bp = netdev_priv(dev);
9279 int i, num_stats = 0;
9281 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9282 if (IS_NOT_E1HMF_STAT(bp, i))
9289 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9290 struct ethtool_stats *stats, u64 *buf)
9292 struct bnx2x *bp = netdev_priv(dev);
9293 u32 *hw_stats = (u32 *)&bp->eth_stats;
9296 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9297 if (IS_NOT_E1HMF_STAT(bp, i))
9300 if (bnx2x_stats_arr[i].size == 0) {
9301 /* skip this counter */
9306 if (bnx2x_stats_arr[i].size == 4) {
9307 /* 4-byte counter */
9308 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9312 /* 8-byte counter */
9313 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9314 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9319 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9321 struct bnx2x *bp = netdev_priv(dev);
9322 int port = BP_PORT(bp);
9325 if (!netif_running(dev))
9334 for (i = 0; i < (data * 2); i++) {
9336 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9337 bp->link_params.hw_led_mode,
9338 bp->link_params.chip_id);
9340 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9341 bp->link_params.hw_led_mode,
9342 bp->link_params.chip_id);
9344 msleep_interruptible(500);
9345 if (signal_pending(current))
9349 if (bp->link_vars.link_up)
9350 bnx2x_set_led(bp, port, LED_MODE_OPER,
9351 bp->link_vars.line_speed,
9352 bp->link_params.hw_led_mode,
9353 bp->link_params.chip_id);
9358 static struct ethtool_ops bnx2x_ethtool_ops = {
9359 .get_settings = bnx2x_get_settings,
9360 .set_settings = bnx2x_set_settings,
9361 .get_drvinfo = bnx2x_get_drvinfo,
9362 .get_wol = bnx2x_get_wol,
9363 .set_wol = bnx2x_set_wol,
9364 .get_msglevel = bnx2x_get_msglevel,
9365 .set_msglevel = bnx2x_set_msglevel,
9366 .nway_reset = bnx2x_nway_reset,
9367 .get_link = ethtool_op_get_link,
9368 .get_eeprom_len = bnx2x_get_eeprom_len,
9369 .get_eeprom = bnx2x_get_eeprom,
9370 .set_eeprom = bnx2x_set_eeprom,
9371 .get_coalesce = bnx2x_get_coalesce,
9372 .set_coalesce = bnx2x_set_coalesce,
9373 .get_ringparam = bnx2x_get_ringparam,
9374 .set_ringparam = bnx2x_set_ringparam,
9375 .get_pauseparam = bnx2x_get_pauseparam,
9376 .set_pauseparam = bnx2x_set_pauseparam,
9377 .get_rx_csum = bnx2x_get_rx_csum,
9378 .set_rx_csum = bnx2x_set_rx_csum,
9379 .get_tx_csum = ethtool_op_get_tx_csum,
9380 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9381 .set_flags = bnx2x_set_flags,
9382 .get_flags = ethtool_op_get_flags,
9383 .get_sg = ethtool_op_get_sg,
9384 .set_sg = ethtool_op_set_sg,
9385 .get_tso = ethtool_op_get_tso,
9386 .set_tso = bnx2x_set_tso,
9387 .self_test_count = bnx2x_self_test_count,
9388 .self_test = bnx2x_self_test,
9389 .get_strings = bnx2x_get_strings,
9390 .phys_id = bnx2x_phys_id,
9391 .get_stats_count = bnx2x_get_stats_count,
9392 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9395 /* end of ethtool_ops */
9397 /****************************************************************************
9398 * General service functions
9399 ****************************************************************************/
9401 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9405 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9409 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9410 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9411 PCI_PM_CTRL_PME_STATUS));
9413 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9414 /* delay required during transition out of D3hot */
9419 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9423 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9425 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9428 /* No more memory access after this point until
9429 * device is brought back to D0.
9439 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9443 /* Tell compiler that status block fields can change */
9445 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9446 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9448 return (fp->rx_comp_cons != rx_cons_sb);
9452 * net_device service functions
9455 static int bnx2x_poll(struct napi_struct *napi, int budget)
9457 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9459 struct bnx2x *bp = fp->bp;
9462 #ifdef BNX2X_STOP_ON_ERROR
9463 if (unlikely(bp->panic))
9467 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9468 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9469 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9471 bnx2x_update_fpsb_idx(fp);
9473 if (bnx2x_has_tx_work(fp))
9474 bnx2x_tx_int(fp, budget);
9476 if (bnx2x_has_rx_work(fp))
9477 work_done = bnx2x_rx_int(fp, budget);
9478 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9480 /* must not complete if we consumed full budget */
9481 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9483 #ifdef BNX2X_STOP_ON_ERROR
9486 napi_complete(napi);
9488 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9489 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9490 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9491 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9497 /* we split the first BD into headers and data BDs
9498 * to ease the pain of our fellow microcode engineers
9499 * we use one mapping for both BDs
9500 * So far this has only been observed to happen
9501 * in Other Operating Systems(TM)
9503 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9504 struct bnx2x_fastpath *fp,
9505 struct eth_tx_bd **tx_bd, u16 hlen,
9506 u16 bd_prod, int nbd)
9508 struct eth_tx_bd *h_tx_bd = *tx_bd;
9509 struct eth_tx_bd *d_tx_bd;
9511 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9513 /* first fix first BD */
9514 h_tx_bd->nbd = cpu_to_le16(nbd);
9515 h_tx_bd->nbytes = cpu_to_le16(hlen);
9517 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9518 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9519 h_tx_bd->addr_lo, h_tx_bd->nbd);
9521 /* now get a new data BD
9522 * (after the pbd) and fill it */
9523 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9524 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9526 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9527 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9529 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9530 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9531 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9533 /* this marks the BD as one that has no individual mapping
9534 * the FW ignores this flag in a BD not marked start
9536 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9537 DP(NETIF_MSG_TX_QUEUED,
9538 "TSO split data size is %d (%x:%x)\n",
9539 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9541 /* update tx_bd for marking the last BD flag */
9547 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9550 csum = (u16) ~csum_fold(csum_sub(csum,
9551 csum_partial(t_header - fix, fix, 0)));
9554 csum = (u16) ~csum_fold(csum_add(csum,
9555 csum_partial(t_header, -fix, 0)));
9557 return swab16(csum);
9560 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9564 if (skb->ip_summed != CHECKSUM_PARTIAL)
9568 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9570 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9571 rc |= XMIT_CSUM_TCP;
9575 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9576 rc |= XMIT_CSUM_TCP;
9580 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9583 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9589 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9590 /* check if packet requires linearization (packet is too fragmented) */
9591 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9596 int first_bd_sz = 0;
9598 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9599 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9601 if (xmit_type & XMIT_GSO) {
9602 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9603 /* Check if LSO packet needs to be copied:
9604 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9605 int wnd_size = MAX_FETCH_BD - 3;
9606 /* Number of windows to check */
9607 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9612 /* Headers length */
9613 hlen = (int)(skb_transport_header(skb) - skb->data) +
9616 /* Amount of data (w/o headers) on linear part of SKB*/
9617 first_bd_sz = skb_headlen(skb) - hlen;
9619 wnd_sum = first_bd_sz;
9621 /* Calculate the first sum - it's special */
9622 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9624 skb_shinfo(skb)->frags[frag_idx].size;
9626 /* If there was data on linear skb data - check it */
9627 if (first_bd_sz > 0) {
9628 if (unlikely(wnd_sum < lso_mss)) {
9633 wnd_sum -= first_bd_sz;
9636 /* Others are easier: run through the frag list and
9637 check all windows */
9638 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9640 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9642 if (unlikely(wnd_sum < lso_mss)) {
9647 skb_shinfo(skb)->frags[wnd_idx].size;
9651 /* in non-LSO too fragmented packet should always
9658 if (unlikely(to_copy))
9659 DP(NETIF_MSG_TX_QUEUED,
9660 "Linearization IS REQUIRED for %s packet. "
9661 "num_frags %d hlen %d first_bd_sz %d\n",
9662 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9663 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9669 /* called with netif_tx_lock
9670 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9671 * netif_wake_queue()
9673 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9675 struct bnx2x *bp = netdev_priv(dev);
9676 struct bnx2x_fastpath *fp;
9677 struct netdev_queue *txq;
9678 struct sw_tx_bd *tx_buf;
9679 struct eth_tx_bd *tx_bd;
9680 struct eth_tx_parse_bd *pbd = NULL;
9681 u16 pkt_prod, bd_prod;
9684 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9685 int vlan_off = (bp->e1hov ? 4 : 0);
9689 #ifdef BNX2X_STOP_ON_ERROR
9690 if (unlikely(bp->panic))
9691 return NETDEV_TX_BUSY;
9694 fp_index = skb_get_queue_mapping(skb);
9695 txq = netdev_get_tx_queue(dev, fp_index);
9697 fp = &bp->fp[fp_index];
9699 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9700 bp->eth_stats.driver_xoff++,
9701 netif_tx_stop_queue(txq);
9702 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9703 return NETDEV_TX_BUSY;
9706 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9707 " gso type %x xmit_type %x\n",
9708 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9709 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9711 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9712 /* First, check if we need to linearize the skb
9713 (due to FW restrictions) */
9714 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9715 /* Statistics of linearization */
9717 if (skb_linearize(skb) != 0) {
9718 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9719 "silently dropping this SKB\n");
9720 dev_kfree_skb_any(skb);
9721 return NETDEV_TX_OK;
9727 Please read carefully. First we use one BD which we mark as start,
9728 then for TSO or xsum we have a parsing info BD,
9729 and only then we have the rest of the TSO BDs.
9730 (don't forget to mark the last one as last,
9731 and to unmap only AFTER you write to the BD ...)
9732 And above all, all pdb sizes are in words - NOT DWORDS!
9735 pkt_prod = fp->tx_pkt_prod++;
9736 bd_prod = TX_BD(fp->tx_bd_prod);
9738 /* get a tx_buf and first BD */
9739 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9740 tx_bd = &fp->tx_desc_ring[bd_prod];
9742 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9743 tx_bd->general_data = (UNICAST_ADDRESS <<
9744 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9746 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9748 /* remember the first BD of the packet */
9749 tx_buf->first_bd = fp->tx_bd_prod;
9752 DP(NETIF_MSG_TX_QUEUED,
9753 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9754 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9757 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9758 (bp->flags & HW_VLAN_TX_FLAG)) {
9759 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9760 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9764 tx_bd->vlan = cpu_to_le16(pkt_prod);
9767 /* turn on parsing and get a BD */
9768 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9769 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9771 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9774 if (xmit_type & XMIT_CSUM) {
9775 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9777 /* for now NS flag is not used in Linux */
9778 pbd->global_data = (hlen |
9779 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9780 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9782 pbd->ip_hlen = (skb_transport_header(skb) -
9783 skb_network_header(skb)) / 2;
9785 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9787 pbd->total_hlen = cpu_to_le16(hlen);
9788 hlen = hlen*2 - vlan_off;
9790 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9792 if (xmit_type & XMIT_CSUM_V4)
9793 tx_bd->bd_flags.as_bitfield |=
9794 ETH_TX_BD_FLAGS_IP_CSUM;
9796 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9798 if (xmit_type & XMIT_CSUM_TCP) {
9799 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9802 s8 fix = SKB_CS_OFF(skb); /* signed! */
9804 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9805 pbd->cs_offset = fix / 2;
9807 DP(NETIF_MSG_TX_QUEUED,
9808 "hlen %d offset %d fix %d csum before fix %x\n",
9809 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9812 /* HW bug: fixup the CSUM */
9813 pbd->tcp_pseudo_csum =
9814 bnx2x_csum_fix(skb_transport_header(skb),
9817 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9818 pbd->tcp_pseudo_csum);
9822 mapping = pci_map_single(bp->pdev, skb->data,
9823 skb_headlen(skb), PCI_DMA_TODEVICE);
9825 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9826 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9827 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9828 tx_bd->nbd = cpu_to_le16(nbd);
9829 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9831 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9832 " nbytes %d flags %x vlan %x\n",
9833 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9834 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9835 le16_to_cpu(tx_bd->vlan));
9837 if (xmit_type & XMIT_GSO) {
9839 DP(NETIF_MSG_TX_QUEUED,
9840 "TSO packet len %d hlen %d total len %d tso size %d\n",
9841 skb->len, hlen, skb_headlen(skb),
9842 skb_shinfo(skb)->gso_size);
9844 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9846 if (unlikely(skb_headlen(skb) > hlen))
9847 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9850 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9851 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9852 pbd->tcp_flags = pbd_tcp_flags(skb);
9854 if (xmit_type & XMIT_GSO_V4) {
9855 pbd->ip_id = swab16(ip_hdr(skb)->id);
9856 pbd->tcp_pseudo_csum =
9857 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9859 0, IPPROTO_TCP, 0));
9862 pbd->tcp_pseudo_csum =
9863 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9864 &ipv6_hdr(skb)->daddr,
9865 0, IPPROTO_TCP, 0));
9867 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9870 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9871 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9873 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9874 tx_bd = &fp->tx_desc_ring[bd_prod];
9876 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9877 frag->size, PCI_DMA_TODEVICE);
9879 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9880 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9881 tx_bd->nbytes = cpu_to_le16(frag->size);
9882 tx_bd->vlan = cpu_to_le16(pkt_prod);
9883 tx_bd->bd_flags.as_bitfield = 0;
9885 DP(NETIF_MSG_TX_QUEUED,
9886 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9887 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9888 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9891 /* now at last mark the BD as the last BD */
9892 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9894 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9895 tx_bd, tx_bd->bd_flags.as_bitfield);
9897 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9899 /* now send a tx doorbell, counting the next BD
9900 * if the packet contains or ends with it
9902 if (TX_BD_POFF(bd_prod) < nbd)
9906 DP(NETIF_MSG_TX_QUEUED,
9907 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9908 " tcp_flags %x xsum %x seq %u hlen %u\n",
9909 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9910 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9911 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9913 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9916 * Make sure that the BD data is updated before updating the producer
9917 * since FW might read the BD right after the producer is updated.
9918 * This is only applicable for weak-ordered memory model archs such
9919 * as IA-64. The following barrier is also mandatory since FW will
9920 * assumes packets must have BDs.
9924 fp->hw_tx_prods->bds_prod =
9925 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9926 mb(); /* FW restriction: must not reorder writing nbd and packets */
9927 fp->hw_tx_prods->packets_prod =
9928 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9929 DOORBELL(bp, FP_IDX(fp), 0);
9933 fp->tx_bd_prod += nbd;
9934 dev->trans_start = jiffies;
9936 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9937 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9938 if we put Tx into XOFF state. */
9940 netif_tx_stop_queue(txq);
9941 bp->eth_stats.driver_xoff++;
9942 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9943 netif_tx_wake_queue(txq);
9947 return NETDEV_TX_OK;
9950 /* called with rtnl_lock */
9951 static int bnx2x_open(struct net_device *dev)
9953 struct bnx2x *bp = netdev_priv(dev);
9955 netif_carrier_off(dev);
9957 bnx2x_set_power_state(bp, PCI_D0);
9959 return bnx2x_nic_load(bp, LOAD_OPEN);
9962 /* called with rtnl_lock */
9963 static int bnx2x_close(struct net_device *dev)
9965 struct bnx2x *bp = netdev_priv(dev);
9967 /* Unload the driver, release IRQs */
9968 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9969 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9970 if (!CHIP_REV_IS_SLOW(bp))
9971 bnx2x_set_power_state(bp, PCI_D3hot);
9976 /* called with netif_tx_lock from set_multicast */
9977 static void bnx2x_set_rx_mode(struct net_device *dev)
9979 struct bnx2x *bp = netdev_priv(dev);
9980 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9981 int port = BP_PORT(bp);
9983 if (bp->state != BNX2X_STATE_OPEN) {
9984 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9988 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9990 if (dev->flags & IFF_PROMISC)
9991 rx_mode = BNX2X_RX_MODE_PROMISC;
9993 else if ((dev->flags & IFF_ALLMULTI) ||
9994 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9995 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9997 else { /* some multicasts */
9998 if (CHIP_IS_E1(bp)) {
10000 struct dev_mc_list *mclist;
10001 struct mac_configuration_cmd *config =
10002 bnx2x_sp(bp, mcast_config);
10004 for (i = 0, mclist = dev->mc_list;
10005 mclist && (i < dev->mc_count);
10006 i++, mclist = mclist->next) {
10008 config->config_table[i].
10009 cam_entry.msb_mac_addr =
10010 swab16(*(u16 *)&mclist->dmi_addr[0]);
10011 config->config_table[i].
10012 cam_entry.middle_mac_addr =
10013 swab16(*(u16 *)&mclist->dmi_addr[2]);
10014 config->config_table[i].
10015 cam_entry.lsb_mac_addr =
10016 swab16(*(u16 *)&mclist->dmi_addr[4]);
10017 config->config_table[i].cam_entry.flags =
10019 config->config_table[i].
10020 target_table_entry.flags = 0;
10021 config->config_table[i].
10022 target_table_entry.client_id = 0;
10023 config->config_table[i].
10024 target_table_entry.vlan_id = 0;
10027 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10028 config->config_table[i].
10029 cam_entry.msb_mac_addr,
10030 config->config_table[i].
10031 cam_entry.middle_mac_addr,
10032 config->config_table[i].
10033 cam_entry.lsb_mac_addr);
10035 old = config->hdr.length;
10037 for (; i < old; i++) {
10038 if (CAM_IS_INVALID(config->
10039 config_table[i])) {
10040 /* already invalidated */
10044 CAM_INVALIDATE(config->
10049 if (CHIP_REV_IS_SLOW(bp))
10050 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10052 offset = BNX2X_MAX_MULTICAST*(1 + port);
10054 config->hdr.length = i;
10055 config->hdr.offset = offset;
10056 config->hdr.client_id = bp->fp->cl_id;
10057 config->hdr.reserved1 = 0;
10059 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10060 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10061 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10064 /* Accept one or more multicasts */
10065 struct dev_mc_list *mclist;
10066 u32 mc_filter[MC_HASH_SIZE];
10067 u32 crc, bit, regidx;
10070 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10072 for (i = 0, mclist = dev->mc_list;
10073 mclist && (i < dev->mc_count);
10074 i++, mclist = mclist->next) {
10076 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10079 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10080 bit = (crc >> 24) & 0xff;
10083 mc_filter[regidx] |= (1 << bit);
10086 for (i = 0; i < MC_HASH_SIZE; i++)
10087 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10092 bp->rx_mode = rx_mode;
10093 bnx2x_set_storm_rx_mode(bp);
10096 /* called with rtnl_lock */
10097 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10099 struct sockaddr *addr = p;
10100 struct bnx2x *bp = netdev_priv(dev);
10102 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10105 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10106 if (netif_running(dev)) {
10107 if (CHIP_IS_E1(bp))
10108 bnx2x_set_mac_addr_e1(bp, 1);
10110 bnx2x_set_mac_addr_e1h(bp, 1);
10116 /* called with rtnl_lock */
10117 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10119 struct mii_ioctl_data *data = if_mii(ifr);
10120 struct bnx2x *bp = netdev_priv(dev);
10121 int port = BP_PORT(bp);
10126 data->phy_id = bp->port.phy_addr;
10130 case SIOCGMIIREG: {
10133 if (!netif_running(dev))
10136 mutex_lock(&bp->port.phy_mutex);
10137 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10138 DEFAULT_PHY_DEV_ADDR,
10139 (data->reg_num & 0x1f), &mii_regval);
10140 data->val_out = mii_regval;
10141 mutex_unlock(&bp->port.phy_mutex);
10146 if (!capable(CAP_NET_ADMIN))
10149 if (!netif_running(dev))
10152 mutex_lock(&bp->port.phy_mutex);
10153 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10154 DEFAULT_PHY_DEV_ADDR,
10155 (data->reg_num & 0x1f), data->val_in);
10156 mutex_unlock(&bp->port.phy_mutex);
10164 return -EOPNOTSUPP;
10167 /* called with rtnl_lock */
10168 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10170 struct bnx2x *bp = netdev_priv(dev);
10173 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10174 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10177 /* This does not race with packet allocation
10178 * because the actual alloc size is
10179 * only updated as part of load
10181 dev->mtu = new_mtu;
10183 if (netif_running(dev)) {
10184 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10185 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10191 static void bnx2x_tx_timeout(struct net_device *dev)
10193 struct bnx2x *bp = netdev_priv(dev);
10195 #ifdef BNX2X_STOP_ON_ERROR
10199 /* This allows the netif to be shutdown gracefully before resetting */
10200 schedule_work(&bp->reset_task);
10204 /* called with rtnl_lock */
10205 static void bnx2x_vlan_rx_register(struct net_device *dev,
10206 struct vlan_group *vlgrp)
10208 struct bnx2x *bp = netdev_priv(dev);
10212 /* Set flags according to the required capabilities */
10213 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10215 if (dev->features & NETIF_F_HW_VLAN_TX)
10216 bp->flags |= HW_VLAN_TX_FLAG;
10218 if (dev->features & NETIF_F_HW_VLAN_RX)
10219 bp->flags |= HW_VLAN_RX_FLAG;
10221 if (netif_running(dev))
10222 bnx2x_set_client_config(bp);
10227 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10228 static void poll_bnx2x(struct net_device *dev)
10230 struct bnx2x *bp = netdev_priv(dev);
10232 disable_irq(bp->pdev->irq);
10233 bnx2x_interrupt(bp->pdev->irq, dev);
10234 enable_irq(bp->pdev->irq);
10238 static const struct net_device_ops bnx2x_netdev_ops = {
10239 .ndo_open = bnx2x_open,
10240 .ndo_stop = bnx2x_close,
10241 .ndo_start_xmit = bnx2x_start_xmit,
10242 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10243 .ndo_set_mac_address = bnx2x_change_mac_addr,
10244 .ndo_validate_addr = eth_validate_addr,
10245 .ndo_do_ioctl = bnx2x_ioctl,
10246 .ndo_change_mtu = bnx2x_change_mtu,
10247 .ndo_tx_timeout = bnx2x_tx_timeout,
10249 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10251 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10252 .ndo_poll_controller = poll_bnx2x,
10257 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10258 struct net_device *dev)
10263 SET_NETDEV_DEV(dev, &pdev->dev);
10264 bp = netdev_priv(dev);
10269 bp->func = PCI_FUNC(pdev->devfn);
10271 rc = pci_enable_device(pdev);
10273 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10277 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10278 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10281 goto err_out_disable;
10284 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10285 printk(KERN_ERR PFX "Cannot find second PCI device"
10286 " base address, aborting\n");
10288 goto err_out_disable;
10291 if (atomic_read(&pdev->enable_cnt) == 1) {
10292 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10294 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10296 goto err_out_disable;
10299 pci_set_master(pdev);
10300 pci_save_state(pdev);
10303 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10304 if (bp->pm_cap == 0) {
10305 printk(KERN_ERR PFX "Cannot find power management"
10306 " capability, aborting\n");
10308 goto err_out_release;
10311 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10312 if (bp->pcie_cap == 0) {
10313 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10316 goto err_out_release;
10319 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10320 bp->flags |= USING_DAC_FLAG;
10321 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10322 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10323 " failed, aborting\n");
10325 goto err_out_release;
10328 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10329 printk(KERN_ERR PFX "System does not support DMA,"
10332 goto err_out_release;
10335 dev->mem_start = pci_resource_start(pdev, 0);
10336 dev->base_addr = dev->mem_start;
10337 dev->mem_end = pci_resource_end(pdev, 0);
10339 dev->irq = pdev->irq;
10341 bp->regview = pci_ioremap_bar(pdev, 0);
10342 if (!bp->regview) {
10343 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10345 goto err_out_release;
10348 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10349 min_t(u64, BNX2X_DB_SIZE,
10350 pci_resource_len(pdev, 2)));
10351 if (!bp->doorbells) {
10352 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10354 goto err_out_unmap;
10357 bnx2x_set_power_state(bp, PCI_D0);
10359 /* clean indirect addresses */
10360 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10361 PCICFG_VENDOR_ID_OFFSET);
10362 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10363 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10364 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10365 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10367 dev->watchdog_timeo = TX_TIMEOUT;
10369 dev->netdev_ops = &bnx2x_netdev_ops;
10370 dev->ethtool_ops = &bnx2x_ethtool_ops;
10371 dev->features |= NETIF_F_SG;
10372 dev->features |= NETIF_F_HW_CSUM;
10373 if (bp->flags & USING_DAC_FLAG)
10374 dev->features |= NETIF_F_HIGHDMA;
10376 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10377 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10379 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10380 dev->features |= NETIF_F_TSO6;
10386 iounmap(bp->regview);
10387 bp->regview = NULL;
10389 if (bp->doorbells) {
10390 iounmap(bp->doorbells);
10391 bp->doorbells = NULL;
10395 if (atomic_read(&pdev->enable_cnt) == 1)
10396 pci_release_regions(pdev);
10399 pci_disable_device(pdev);
10400 pci_set_drvdata(pdev, NULL);
10406 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10408 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10410 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10414 /* return value of 1=2.5GHz 2=5GHz */
10415 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10417 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10419 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10423 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10424 const struct pci_device_id *ent)
10426 static int version_printed;
10427 struct net_device *dev = NULL;
10431 if (version_printed++ == 0)
10432 printk(KERN_INFO "%s", version);
10434 /* dev zeroed in init_etherdev */
10435 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10437 printk(KERN_ERR PFX "Cannot allocate net device\n");
10441 bp = netdev_priv(dev);
10442 bp->msglevel = debug;
10444 rc = bnx2x_init_dev(pdev, dev);
10450 pci_set_drvdata(pdev, dev);
10452 rc = bnx2x_init_bp(bp);
10454 goto init_one_exit;
10456 rc = register_netdev(dev);
10458 dev_err(&pdev->dev, "Cannot register net device\n");
10459 goto init_one_exit;
10462 bp->common.name = board_info[ent->driver_data].name;
10463 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10464 " IRQ %d, ", dev->name, bp->common.name,
10465 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10466 bnx2x_get_pcie_width(bp),
10467 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10468 dev->base_addr, bp->pdev->irq);
10469 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10474 iounmap(bp->regview);
10477 iounmap(bp->doorbells);
10481 if (atomic_read(&pdev->enable_cnt) == 1)
10482 pci_release_regions(pdev);
10484 pci_disable_device(pdev);
10485 pci_set_drvdata(pdev, NULL);
10490 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10492 struct net_device *dev = pci_get_drvdata(pdev);
10496 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10499 bp = netdev_priv(dev);
10501 unregister_netdev(dev);
10504 iounmap(bp->regview);
10507 iounmap(bp->doorbells);
10511 if (atomic_read(&pdev->enable_cnt) == 1)
10512 pci_release_regions(pdev);
10514 pci_disable_device(pdev);
10515 pci_set_drvdata(pdev, NULL);
10518 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10520 struct net_device *dev = pci_get_drvdata(pdev);
10524 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10527 bp = netdev_priv(dev);
10531 pci_save_state(pdev);
10533 if (!netif_running(dev)) {
10538 netif_device_detach(dev);
10540 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10542 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10549 static int bnx2x_resume(struct pci_dev *pdev)
10551 struct net_device *dev = pci_get_drvdata(pdev);
10556 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10559 bp = netdev_priv(dev);
10563 pci_restore_state(pdev);
10565 if (!netif_running(dev)) {
10570 bnx2x_set_power_state(bp, PCI_D0);
10571 netif_device_attach(dev);
10573 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10580 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10584 bp->state = BNX2X_STATE_ERROR;
10586 bp->rx_mode = BNX2X_RX_MODE_NONE;
10588 bnx2x_netif_stop(bp, 0);
10590 del_timer_sync(&bp->timer);
10591 bp->stats_state = STATS_STATE_DISABLED;
10592 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10595 bnx2x_free_irq(bp);
10597 if (CHIP_IS_E1(bp)) {
10598 struct mac_configuration_cmd *config =
10599 bnx2x_sp(bp, mcast_config);
10601 for (i = 0; i < config->hdr.length; i++)
10602 CAM_INVALIDATE(config->config_table[i]);
10605 /* Free SKBs, SGEs, TPA pool and driver internals */
10606 bnx2x_free_skbs(bp);
10607 for_each_rx_queue(bp, i)
10608 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10609 for_each_rx_queue(bp, i)
10610 netif_napi_del(&bnx2x_fp(bp, i, napi));
10611 bnx2x_free_mem(bp);
10613 bp->state = BNX2X_STATE_CLOSED;
10615 netif_carrier_off(bp->dev);
10620 static void bnx2x_eeh_recover(struct bnx2x *bp)
10624 mutex_init(&bp->port.phy_mutex);
10626 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10627 bp->link_params.shmem_base = bp->common.shmem_base;
10628 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10630 if (!bp->common.shmem_base ||
10631 (bp->common.shmem_base < 0xA0000) ||
10632 (bp->common.shmem_base >= 0xC0000)) {
10633 BNX2X_DEV_INFO("MCP not active\n");
10634 bp->flags |= NO_MCP_FLAG;
10638 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10639 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10640 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10641 BNX2X_ERR("BAD MCP validity signature\n");
10643 if (!BP_NOMCP(bp)) {
10644 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10645 & DRV_MSG_SEQ_NUMBER_MASK);
10646 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10651 * bnx2x_io_error_detected - called when PCI error is detected
10652 * @pdev: Pointer to PCI device
10653 * @state: The current pci connection state
10655 * This function is called after a PCI bus error affecting
10656 * this device has been detected.
10658 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10659 pci_channel_state_t state)
10661 struct net_device *dev = pci_get_drvdata(pdev);
10662 struct bnx2x *bp = netdev_priv(dev);
10666 netif_device_detach(dev);
10668 if (netif_running(dev))
10669 bnx2x_eeh_nic_unload(bp);
10671 pci_disable_device(pdev);
10675 /* Request a slot reset */
10676 return PCI_ERS_RESULT_NEED_RESET;
10680 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10681 * @pdev: Pointer to PCI device
10683 * Restart the card from scratch, as if from a cold-boot.
10685 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10687 struct net_device *dev = pci_get_drvdata(pdev);
10688 struct bnx2x *bp = netdev_priv(dev);
10692 if (pci_enable_device(pdev)) {
10693 dev_err(&pdev->dev,
10694 "Cannot re-enable PCI device after reset\n");
10696 return PCI_ERS_RESULT_DISCONNECT;
10699 pci_set_master(pdev);
10700 pci_restore_state(pdev);
10702 if (netif_running(dev))
10703 bnx2x_set_power_state(bp, PCI_D0);
10707 return PCI_ERS_RESULT_RECOVERED;
10711 * bnx2x_io_resume - called when traffic can start flowing again
10712 * @pdev: Pointer to PCI device
10714 * This callback is called when the error recovery driver tells us that
10715 * its OK to resume normal operation.
10717 static void bnx2x_io_resume(struct pci_dev *pdev)
10719 struct net_device *dev = pci_get_drvdata(pdev);
10720 struct bnx2x *bp = netdev_priv(dev);
10724 bnx2x_eeh_recover(bp);
10726 if (netif_running(dev))
10727 bnx2x_nic_load(bp, LOAD_NORMAL);
10729 netif_device_attach(dev);
10734 static struct pci_error_handlers bnx2x_err_handler = {
10735 .error_detected = bnx2x_io_error_detected,
10736 .slot_reset = bnx2x_io_slot_reset,
10737 .resume = bnx2x_io_resume,
10740 static struct pci_driver bnx2x_pci_driver = {
10741 .name = DRV_MODULE_NAME,
10742 .id_table = bnx2x_pci_tbl,
10743 .probe = bnx2x_init_one,
10744 .remove = __devexit_p(bnx2x_remove_one),
10745 .suspend = bnx2x_suspend,
10746 .resume = bnx2x_resume,
10747 .err_handler = &bnx2x_err_handler,
10750 static int __init bnx2x_init(void)
10752 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10753 if (bnx2x_wq == NULL) {
10754 printk(KERN_ERR PFX "Cannot create workqueue\n");
10758 return pci_register_driver(&bnx2x_pci_driver);
10761 static void __exit bnx2x_cleanup(void)
10763 pci_unregister_driver(&bnx2x_pci_driver);
10765 destroy_workqueue(bnx2x_wq);
10768 module_init(bnx2x_init);
10769 module_exit(bnx2x_cleanup);