1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.26"
61 #define DRV_MODULE_RELDATE "2009/01/26"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
79 static int disable_tpa;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 module_param(disable_tpa, int, 0);
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
90 module_param(poll, int, 0);
93 module_param(mrrs, int, 0);
94 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
96 module_param(debug, int, 0);
97 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
98 MODULE_PARM_DESC(poll, "use polling (for debug)");
99 MODULE_PARM_DESC(debug, "default debug msglevel");
101 static struct workqueue_struct *bnx2x_wq;
103 enum bnx2x_board_type {
109 /* indexed by board_type, above */
112 } board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" },
114 { "Broadcom NetXtreme II BCM57711 XGb" },
115 { "Broadcom NetXtreme II BCM57711E XGb" }
119 static const struct pci_device_id bnx2x_pci_tbl[] = {
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
129 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
131 /****************************************************************************
132 * General service functions
133 ****************************************************************************/
136 * locking is done by mcp
138 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
140 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
141 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
142 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
143 PCICFG_VENDOR_ID_OFFSET);
146 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
150 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
151 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
152 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
153 PCICFG_VENDOR_ID_OFFSET);
158 static const u32 dmae_reg_go_c[] = {
159 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
160 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
161 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
162 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
165 /* copy command into DMAE command memory and set DMAE command go */
166 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
172 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
173 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
174 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
177 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
179 REG_WR(bp, dmae_reg_go_c[idx], 1);
182 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
185 struct dmae_command *dmae = &bp->init_dmae;
186 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
189 if (!bp->dmae_ready) {
190 u32 *data = bnx2x_sp(bp, wb_data[0]);
192 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
193 " using indirect\n", dst_addr, len32);
194 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
198 mutex_lock(&bp->dmae_mutex);
200 memset(dmae, 0, sizeof(struct dmae_command));
202 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
203 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
208 DMAE_CMD_ENDIANITY_DW_SWAP |
210 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
211 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
212 dmae->src_addr_lo = U64_LO(dma_addr);
213 dmae->src_addr_hi = U64_HI(dma_addr);
214 dmae->dst_addr_lo = dst_addr >> 2;
215 dmae->dst_addr_hi = 0;
217 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
218 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
219 dmae->comp_val = DMAE_COMP_VAL;
221 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
222 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
223 "dst_addr [%x:%08x (%08x)]\n"
224 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
225 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
226 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
227 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
228 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
229 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
230 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
234 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
238 while (*wb_comp != DMAE_COMP_VAL) {
239 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
242 BNX2X_ERR("dmae timeout!\n");
246 /* adjust delay for emulation/FPGA */
247 if (CHIP_REV_IS_SLOW(bp))
253 mutex_unlock(&bp->dmae_mutex);
256 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
258 struct dmae_command *dmae = &bp->init_dmae;
259 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
262 if (!bp->dmae_ready) {
263 u32 *data = bnx2x_sp(bp, wb_data[0]);
266 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
267 " using indirect\n", src_addr, len32);
268 for (i = 0; i < len32; i++)
269 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
273 mutex_lock(&bp->dmae_mutex);
275 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
276 memset(dmae, 0, sizeof(struct dmae_command));
278 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
279 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
280 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
282 DMAE_CMD_ENDIANITY_B_DW_SWAP |
284 DMAE_CMD_ENDIANITY_DW_SWAP |
286 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
287 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
288 dmae->src_addr_lo = src_addr >> 2;
289 dmae->src_addr_hi = 0;
290 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
293 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
294 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
295 dmae->comp_val = DMAE_COMP_VAL;
297 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
298 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
299 "dst_addr [%x:%08x (%08x)]\n"
300 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
301 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
302 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
303 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
307 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
311 while (*wb_comp != DMAE_COMP_VAL) {
314 BNX2X_ERR("dmae timeout!\n");
318 /* adjust delay for emulation/FPGA */
319 if (CHIP_REV_IS_SLOW(bp))
324 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
325 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
326 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
328 mutex_unlock(&bp->dmae_mutex);
331 /* used only for slowpath so not inlined */
332 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
336 wb_write[0] = val_hi;
337 wb_write[1] = val_lo;
338 REG_WR_DMAE(bp, reg, wb_write, 2);
342 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
346 REG_RD_DMAE(bp, reg, wb_data, 2);
348 return HILO_U64(wb_data[0], wb_data[1]);
352 static int bnx2x_mc_assert(struct bnx2x *bp)
356 u32 row0, row1, row2, row3;
359 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
360 XSTORM_ASSERT_LIST_INDEX_OFFSET);
362 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
364 /* print the asserts */
365 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
367 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i));
369 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
371 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
372 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
373 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
376 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
377 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
378 " 0x%08x 0x%08x 0x%08x\n",
379 i, row3, row2, row1, row0);
387 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
388 TSTORM_ASSERT_LIST_INDEX_OFFSET);
390 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
392 /* print the asserts */
393 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
395 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i));
397 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
399 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
400 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
401 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
404 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
405 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
406 " 0x%08x 0x%08x 0x%08x\n",
407 i, row3, row2, row1, row0);
415 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
416 CSTORM_ASSERT_LIST_INDEX_OFFSET);
418 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
420 /* print the asserts */
421 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
423 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i));
425 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
427 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
428 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
429 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
432 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
433 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
434 " 0x%08x 0x%08x 0x%08x\n",
435 i, row3, row2, row1, row0);
443 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
444 USTORM_ASSERT_LIST_INDEX_OFFSET);
446 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
448 /* print the asserts */
449 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
451 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i));
453 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 4);
455 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
456 USTORM_ASSERT_LIST_OFFSET(i) + 8);
457 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i) + 12);
460 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
461 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
462 " 0x%08x 0x%08x 0x%08x\n",
463 i, row3, row2, row1, row0);
473 static void bnx2x_fw_dump(struct bnx2x *bp)
479 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
480 mark = ((mark + 0x3) & ~0x3);
481 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
483 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
484 for (word = 0; word < 8; word++)
485 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 printk(KERN_CONT "%s", (char *)data);
490 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
491 for (word = 0; word < 8; word++)
492 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
495 printk(KERN_CONT "%s", (char *)data);
497 printk("\n" KERN_ERR PFX "end of fw dump\n");
500 static void bnx2x_panic_dump(struct bnx2x *bp)
505 bp->stats_state = STATS_STATE_DISABLED;
506 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
508 BNX2X_ERR("begin crash dump -----------------\n");
510 for_each_queue(bp, i) {
511 struct bnx2x_fastpath *fp = &bp->fp[i];
512 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
514 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
515 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
516 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
517 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
518 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
519 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
520 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
521 fp->rx_bd_prod, fp->rx_bd_cons,
522 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
523 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
524 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
525 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
526 " *sb_u_idx(%x) bd data(%x,%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
528 fp->status_blk->c_status_block.status_block_index,
530 fp->status_blk->u_status_block.status_block_index,
531 hw_prods->packets_prod, hw_prods->bds_prod);
533 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
534 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
535 for (j = start; j < end; j++) {
536 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
538 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
539 sw_bd->skb, sw_bd->first_bd);
542 start = TX_BD(fp->tx_bd_cons - 10);
543 end = TX_BD(fp->tx_bd_cons + 254);
544 for (j = start; j < end; j++) {
545 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
547 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
548 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
551 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
552 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
553 for (j = start; j < end; j++) {
554 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
555 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
558 j, rx_bd[1], rx_bd[0], sw_bd->skb);
561 start = RX_SGE(fp->rx_sge_prod);
562 end = RX_SGE(fp->last_max_sge);
563 for (j = start; j < end; j++) {
564 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
565 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
568 j, rx_sge[1], rx_sge[0], sw_page->page);
571 start = RCQ_BD(fp->rx_comp_cons - 10);
572 end = RCQ_BD(fp->rx_comp_cons + 503);
573 for (j = start; j < end; j++) {
574 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
577 j, cqe[0], cqe[1], cqe[2], cqe[3]);
581 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
582 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
583 " spq_prod_idx(%u)\n",
584 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
585 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
589 BNX2X_ERR("end crash dump -----------------\n");
592 static void bnx2x_int_enable(struct bnx2x *bp)
594 int port = BP_PORT(bp);
595 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
596 u32 val = REG_RD(bp, addr);
597 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
598 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
601 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602 HC_CONFIG_0_REG_INT_LINE_EN_0);
603 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
607 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
608 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
611 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
612 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
613 HC_CONFIG_0_REG_INT_LINE_EN_0 |
614 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
619 REG_WR(bp, addr, val);
621 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
624 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
625 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
627 REG_WR(bp, addr, val);
629 if (CHIP_IS_E1H(bp)) {
630 /* init leading/trailing edge */
632 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
634 /* enable nig and gpio3 attention */
639 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
640 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
644 static void bnx2x_int_disable(struct bnx2x *bp)
646 int port = BP_PORT(bp);
647 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
648 u32 val = REG_RD(bp, addr);
650 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
651 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
652 HC_CONFIG_0_REG_INT_LINE_EN_0 |
653 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
655 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
658 /* flush all outstanding writes */
661 REG_WR(bp, addr, val);
662 if (REG_RD(bp, addr) != val)
663 BNX2X_ERR("BUG! proper val not read from IGU!\n");
666 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
668 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
671 /* disable interrupt handling */
672 atomic_inc(&bp->intr_sem);
674 /* prevent the HW from sending interrupts */
675 bnx2x_int_disable(bp);
677 /* make sure all ISRs are done */
679 synchronize_irq(bp->msix_table[0].vector);
681 for_each_queue(bp, i)
682 synchronize_irq(bp->msix_table[i + offset].vector);
684 synchronize_irq(bp->pdev->irq);
686 /* make sure sp_task is not running */
687 cancel_delayed_work(&bp->sp_task);
688 flush_workqueue(bnx2x_wq);
694 * General service functions
697 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
698 u8 storm, u16 index, u8 op, u8 update)
700 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
701 COMMAND_REG_INT_ACK);
702 struct igu_ack_register igu_ack;
704 igu_ack.status_block_index = index;
705 igu_ack.sb_id_and_flags =
706 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
707 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
708 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
709 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
711 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
712 (*(u32 *)&igu_ack), hc_addr);
713 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
716 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
718 struct host_status_block *fpsb = fp->status_blk;
721 barrier(); /* status block is written to by the chip */
722 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
723 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
726 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
727 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
733 static u16 bnx2x_ack_int(struct bnx2x *bp)
735 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
736 COMMAND_REG_SIMD_MASK);
737 u32 result = REG_RD(bp, hc_addr);
739 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
747 * fast path service functions
750 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
754 /* Tell compiler that status block fields can change */
756 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
757 return (fp->tx_pkt_cons != tx_cons_sb);
760 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
762 /* Tell compiler that consumer and producer can change */
764 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
768 /* free skb in the packet ring at pos idx
769 * return idx of last bd freed
771 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
774 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
775 struct eth_tx_bd *tx_bd;
776 struct sk_buff *skb = tx_buf->skb;
777 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
780 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
784 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
785 tx_bd = &fp->tx_desc_ring[bd_idx];
786 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
787 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
789 nbd = le16_to_cpu(tx_bd->nbd) - 1;
790 new_cons = nbd + tx_buf->first_bd;
791 #ifdef BNX2X_STOP_ON_ERROR
792 if (nbd > (MAX_SKB_FRAGS + 2)) {
793 BNX2X_ERR("BAD nbd!\n");
798 /* Skip a parse bd and the TSO split header bd
799 since they have no mapping */
801 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
804 ETH_TX_BD_FLAGS_TCP_CSUM |
805 ETH_TX_BD_FLAGS_SW_LSO)) {
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 tx_bd = &fp->tx_desc_ring[bd_idx];
809 /* is this a TSO split header bd? */
810 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
812 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
819 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
820 tx_bd = &fp->tx_desc_ring[bd_idx];
821 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
822 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
824 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
830 tx_buf->first_bd = 0;
836 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
842 barrier(); /* Tell compiler that prod and cons can change */
843 prod = fp->tx_bd_prod;
844 cons = fp->tx_bd_cons;
846 /* NUM_TX_RINGS = number of "next-page" entries
847 It will be used as a threshold */
848 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
850 #ifdef BNX2X_STOP_ON_ERROR
852 WARN_ON(used > fp->bp->tx_ring_size);
853 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
856 return (s16)(fp->bp->tx_ring_size) - used;
859 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
861 struct bnx2x *bp = fp->bp;
862 struct netdev_queue *txq;
863 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
866 #ifdef BNX2X_STOP_ON_ERROR
867 if (unlikely(bp->panic))
871 txq = netdev_get_tx_queue(bp->dev, fp->index);
872 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
873 sw_cons = fp->tx_pkt_cons;
875 while (sw_cons != hw_cons) {
878 pkt_cons = TX_BD(sw_cons);
880 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
882 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
883 hw_cons, sw_cons, pkt_cons);
885 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
887 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
890 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
898 fp->tx_pkt_cons = sw_cons;
899 fp->tx_bd_cons = bd_cons;
901 /* Need to make the tx_bd_cons update visible to start_xmit()
902 * before checking for netif_tx_queue_stopped(). Without the
903 * memory barrier, there is a small possibility that start_xmit()
904 * will miss it and cause the queue to be stopped forever.
908 /* TBD need a thresh? */
909 if (unlikely(netif_tx_queue_stopped(txq))) {
911 __netif_tx_lock(txq, smp_processor_id());
913 if ((netif_tx_queue_stopped(txq)) &&
914 (bp->state == BNX2X_STATE_OPEN) &&
915 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
916 netif_tx_wake_queue(txq);
918 __netif_tx_unlock(txq);
923 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
924 union eth_rx_cqe *rr_cqe)
926 struct bnx2x *bp = fp->bp;
927 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
928 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
931 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
932 FP_IDX(fp), cid, command, bp->state,
933 rr_cqe->ramrod_cqe.ramrod_type);
938 switch (command | fp->state) {
939 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
940 BNX2X_FP_STATE_OPENING):
941 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
943 fp->state = BNX2X_FP_STATE_OPEN;
946 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
947 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
949 fp->state = BNX2X_FP_STATE_HALTED;
953 BNX2X_ERR("unexpected MC reply (%d) "
954 "fp->state is %x\n", command, fp->state);
957 mb(); /* force bnx2x_wait_ramrod() to see the change */
961 switch (command | bp->state) {
962 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
963 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
964 bp->state = BNX2X_STATE_OPEN;
967 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
968 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
969 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
970 fp->state = BNX2X_FP_STATE_HALTED;
973 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
974 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
975 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
979 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
981 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
982 bp->set_mac_pending = 0;
985 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
986 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
990 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
994 mb(); /* force bnx2x_wait_ramrod() to see the change */
997 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
998 struct bnx2x_fastpath *fp, u16 index)
1000 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1001 struct page *page = sw_buf->page;
1002 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1004 /* Skip "next page" elements */
1008 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1009 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1010 __free_pages(page, PAGES_PER_SGE_SHIFT);
1012 sw_buf->page = NULL;
1017 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1018 struct bnx2x_fastpath *fp, int last)
1022 for (i = 0; i < last; i++)
1023 bnx2x_free_rx_sge(bp, fp, i);
1026 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1027 struct bnx2x_fastpath *fp, u16 index)
1029 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1030 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1031 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1034 if (unlikely(page == NULL))
1037 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1038 PCI_DMA_FROMDEVICE);
1039 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1040 __free_pages(page, PAGES_PER_SGE_SHIFT);
1044 sw_buf->page = page;
1045 pci_unmap_addr_set(sw_buf, mapping, mapping);
1047 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1048 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1053 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1054 struct bnx2x_fastpath *fp, u16 index)
1056 struct sk_buff *skb;
1057 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1058 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1061 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1062 if (unlikely(skb == NULL))
1065 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1066 PCI_DMA_FROMDEVICE);
1067 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1073 pci_unmap_addr_set(rx_buf, mapping, mapping);
1075 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1076 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1081 /* note that we are not allocating a new skb,
1082 * we are just moving one from cons to prod
1083 * we are not creating a new mapping,
1084 * so there is no need to check for dma_mapping_error().
1086 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1087 struct sk_buff *skb, u16 cons, u16 prod)
1089 struct bnx2x *bp = fp->bp;
1090 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1091 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1092 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1093 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1095 pci_dma_sync_single_for_device(bp->pdev,
1096 pci_unmap_addr(cons_rx_buf, mapping),
1097 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1099 prod_rx_buf->skb = cons_rx_buf->skb;
1100 pci_unmap_addr_set(prod_rx_buf, mapping,
1101 pci_unmap_addr(cons_rx_buf, mapping));
1102 *prod_bd = *cons_bd;
1105 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1108 u16 last_max = fp->last_max_sge;
1110 if (SUB_S16(idx, last_max) > 0)
1111 fp->last_max_sge = idx;
1114 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1118 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1119 int idx = RX_SGE_CNT * i - 1;
1121 for (j = 0; j < 2; j++) {
1122 SGE_MASK_CLEAR_BIT(fp, idx);
1128 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1129 struct eth_fast_path_rx_cqe *fp_cqe)
1131 struct bnx2x *bp = fp->bp;
1132 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1133 le16_to_cpu(fp_cqe->len_on_bd)) >>
1135 u16 last_max, last_elem, first_elem;
1142 /* First mark all used pages */
1143 for (i = 0; i < sge_len; i++)
1144 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1146 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1147 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1149 /* Here we assume that the last SGE index is the biggest */
1150 prefetch((void *)(fp->sge_mask));
1151 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1153 last_max = RX_SGE(fp->last_max_sge);
1154 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1155 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1157 /* If ring is not full */
1158 if (last_elem + 1 != first_elem)
1161 /* Now update the prod */
1162 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1163 if (likely(fp->sge_mask[i]))
1166 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1167 delta += RX_SGE_MASK_ELEM_SZ;
1171 fp->rx_sge_prod += delta;
1172 /* clear page-end entries */
1173 bnx2x_clear_sge_mask_next_elems(fp);
1176 DP(NETIF_MSG_RX_STATUS,
1177 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1178 fp->last_max_sge, fp->rx_sge_prod);
1181 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1183 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1184 memset(fp->sge_mask, 0xff,
1185 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1187 /* Clear the two last indices in the page to 1:
1188 these are the indices that correspond to the "next" element,
1189 hence will never be indicated and should be removed from
1190 the calculations. */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1194 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1195 struct sk_buff *skb, u16 cons, u16 prod)
1197 struct bnx2x *bp = fp->bp;
1198 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1199 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1200 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1203 /* move empty skb from pool to prod and map it */
1204 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1205 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1206 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1207 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1209 /* move partial skb from cons to pool (don't unmap yet) */
1210 fp->tpa_pool[queue] = *cons_rx_buf;
1212 /* mark bin state as start - print error if current state != stop */
1213 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1214 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1216 fp->tpa_state[queue] = BNX2X_TPA_START;
1218 /* point prod_bd to new skb */
1219 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1220 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1222 #ifdef BNX2X_STOP_ON_ERROR
1223 fp->tpa_queue_used |= (1 << queue);
1224 #ifdef __powerpc64__
1225 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1227 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1229 fp->tpa_queue_used);
1233 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1234 struct sk_buff *skb,
1235 struct eth_fast_path_rx_cqe *fp_cqe,
1238 struct sw_rx_page *rx_pg, old_rx_pg;
1239 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1240 u32 i, frag_len, frag_size, pages;
1244 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1245 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1247 /* This is needed in order to enable forwarding support */
1249 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1250 max(frag_size, (u32)len_on_bd));
1252 #ifdef BNX2X_STOP_ON_ERROR
1254 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1255 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1257 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1258 fp_cqe->pkt_len, len_on_bd);
1264 /* Run through the SGL and compose the fragmented skb */
1265 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1266 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1268 /* FW gives the indices of the SGE as if the ring is an array
1269 (meaning that "next" element will consume 2 indices) */
1270 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1271 rx_pg = &fp->rx_page_ring[sge_idx];
1274 /* If we fail to allocate a substitute page, we simply stop
1275 where we are and drop the whole packet */
1276 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1277 if (unlikely(err)) {
1278 fp->eth_q_stats.rx_skb_alloc_failed++;
1282 /* Unmap the page as we r going to pass it to the stack */
1283 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1284 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1286 /* Add one frag and update the appropriate fields in the skb */
1287 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1289 skb->data_len += frag_len;
1290 skb->truesize += frag_len;
1291 skb->len += frag_len;
1293 frag_size -= frag_len;
1299 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1300 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1303 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1304 struct sk_buff *skb = rx_buf->skb;
1306 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1308 /* Unmap skb in the pool anyway, as we are going to change
1309 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1311 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1312 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1314 if (likely(new_skb)) {
1315 /* fix ip xsum and give it to the stack */
1316 /* (no need to map the new skb) */
1319 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1320 PARSING_FLAGS_VLAN);
1321 int is_not_hwaccel_vlan_cqe =
1322 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1326 prefetch(((char *)(skb)) + 128);
1328 #ifdef BNX2X_STOP_ON_ERROR
1329 if (pad + len > bp->rx_buf_size) {
1330 BNX2X_ERR("skb_put is about to fail... "
1331 "pad %d len %d rx_buf_size %d\n",
1332 pad, len, bp->rx_buf_size);
1338 skb_reserve(skb, pad);
1341 skb->protocol = eth_type_trans(skb, bp->dev);
1342 skb->ip_summed = CHECKSUM_UNNECESSARY;
1347 iph = (struct iphdr *)skb->data;
1349 /* If there is no Rx VLAN offloading -
1350 take VLAN tag into an account */
1351 if (unlikely(is_not_hwaccel_vlan_cqe))
1352 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1355 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1358 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1359 &cqe->fast_path_cqe, cqe_idx)) {
1361 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1362 (!is_not_hwaccel_vlan_cqe))
1363 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1364 le16_to_cpu(cqe->fast_path_cqe.
1368 netif_receive_skb(skb);
1370 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1371 " - dropping packet!\n");
1376 /* put new skb in bin */
1377 fp->tpa_pool[queue].skb = new_skb;
1380 /* else drop the packet and keep the buffer in the bin */
1381 DP(NETIF_MSG_RX_STATUS,
1382 "Failed to allocate new skb - dropping packet!\n");
1383 fp->eth_q_stats.rx_skb_alloc_failed++;
1386 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1389 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1390 struct bnx2x_fastpath *fp,
1391 u16 bd_prod, u16 rx_comp_prod,
1394 struct ustorm_eth_rx_producers rx_prods = {0};
1397 /* Update producers */
1398 rx_prods.bd_prod = bd_prod;
1399 rx_prods.cqe_prod = rx_comp_prod;
1400 rx_prods.sge_prod = rx_sge_prod;
1403 * Make sure that the BD and SGE data is updated before updating the
1404 * producers since FW might read the BD/SGE right after the producer
1406 * This is only applicable for weak-ordered memory model archs such
1407 * as IA-64. The following barrier is also mandatory since FW will
1408 * assumes BDs must have buffers.
1412 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1413 REG_WR(bp, BAR_USTRORM_INTMEM +
1414 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1415 ((u32 *)&rx_prods)[i]);
1417 mmiowb(); /* keep prod updates ordered */
1419 DP(NETIF_MSG_RX_STATUS,
1420 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1421 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1424 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1426 struct bnx2x *bp = fp->bp;
1427 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1428 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1431 #ifdef BNX2X_STOP_ON_ERROR
1432 if (unlikely(bp->panic))
1436 /* CQ "next element" is of the size of the regular element,
1437 that's why it's ok here */
1438 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1439 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1442 bd_cons = fp->rx_bd_cons;
1443 bd_prod = fp->rx_bd_prod;
1444 bd_prod_fw = bd_prod;
1445 sw_comp_cons = fp->rx_comp_cons;
1446 sw_comp_prod = fp->rx_comp_prod;
1448 /* Memory barrier necessary as speculative reads of the rx
1449 * buffer can be ahead of the index in the status block
1453 DP(NETIF_MSG_RX_STATUS,
1454 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1455 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1457 while (sw_comp_cons != hw_comp_cons) {
1458 struct sw_rx_bd *rx_buf = NULL;
1459 struct sk_buff *skb;
1460 union eth_rx_cqe *cqe;
1464 comp_ring_cons = RCQ_BD(sw_comp_cons);
1465 bd_prod = RX_BD(bd_prod);
1466 bd_cons = RX_BD(bd_cons);
1468 cqe = &fp->rx_comp_ring[comp_ring_cons];
1469 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1471 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1472 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1473 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1474 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1475 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1476 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1478 /* is this a slowpath msg? */
1479 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1480 bnx2x_sp_event(fp, cqe);
1483 /* this is an rx packet */
1485 rx_buf = &fp->rx_buf_ring[bd_cons];
1487 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1488 pad = cqe->fast_path_cqe.placement_offset;
1490 /* If CQE is marked both TPA_START and TPA_END
1491 it is a non-TPA CQE */
1492 if ((!fp->disable_tpa) &&
1493 (TPA_TYPE(cqe_fp_flags) !=
1494 (TPA_TYPE_START | TPA_TYPE_END))) {
1495 u16 queue = cqe->fast_path_cqe.queue_index;
1497 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1498 DP(NETIF_MSG_RX_STATUS,
1499 "calling tpa_start on queue %d\n",
1502 bnx2x_tpa_start(fp, queue, skb,
1507 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1508 DP(NETIF_MSG_RX_STATUS,
1509 "calling tpa_stop on queue %d\n",
1512 if (!BNX2X_RX_SUM_FIX(cqe))
1513 BNX2X_ERR("STOP on none TCP "
1516 /* This is a size of the linear data
1518 len = le16_to_cpu(cqe->fast_path_cqe.
1520 bnx2x_tpa_stop(bp, fp, queue, pad,
1521 len, cqe, comp_ring_cons);
1522 #ifdef BNX2X_STOP_ON_ERROR
1527 bnx2x_update_sge_prod(fp,
1528 &cqe->fast_path_cqe);
1533 pci_dma_sync_single_for_device(bp->pdev,
1534 pci_unmap_addr(rx_buf, mapping),
1535 pad + RX_COPY_THRESH,
1536 PCI_DMA_FROMDEVICE);
1538 prefetch(((char *)(skb)) + 128);
1540 /* is this an error packet? */
1541 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1542 DP(NETIF_MSG_RX_ERR,
1543 "ERROR flags %x rx packet %u\n",
1544 cqe_fp_flags, sw_comp_cons);
1545 fp->eth_q_stats.rx_err_discard_pkt++;
1549 /* Since we don't have a jumbo ring
1550 * copy small packets if mtu > 1500
1552 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1553 (len <= RX_COPY_THRESH)) {
1554 struct sk_buff *new_skb;
1556 new_skb = netdev_alloc_skb(bp->dev,
1558 if (new_skb == NULL) {
1559 DP(NETIF_MSG_RX_ERR,
1560 "ERROR packet dropped "
1561 "because of alloc failure\n");
1562 fp->eth_q_stats.rx_skb_alloc_failed++;
1567 skb_copy_from_linear_data_offset(skb, pad,
1568 new_skb->data + pad, len);
1569 skb_reserve(new_skb, pad);
1570 skb_put(new_skb, len);
1572 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1576 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1577 pci_unmap_single(bp->pdev,
1578 pci_unmap_addr(rx_buf, mapping),
1580 PCI_DMA_FROMDEVICE);
1581 skb_reserve(skb, pad);
1585 DP(NETIF_MSG_RX_ERR,
1586 "ERROR packet dropped because "
1587 "of alloc failure\n");
1588 fp->eth_q_stats.rx_skb_alloc_failed++;
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1594 skb->protocol = eth_type_trans(skb, bp->dev);
1596 skb->ip_summed = CHECKSUM_NONE;
1598 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1599 skb->ip_summed = CHECKSUM_UNNECESSARY;
1601 fp->eth_q_stats.hw_csum_err++;
1605 skb_record_rx_queue(skb, fp->index);
1607 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1608 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1609 PARSING_FLAGS_VLAN))
1610 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1611 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1614 netif_receive_skb(skb);
1620 bd_cons = NEXT_RX_IDX(bd_cons);
1621 bd_prod = NEXT_RX_IDX(bd_prod);
1622 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1625 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1626 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1628 if (rx_pkt == budget)
1632 fp->rx_bd_cons = bd_cons;
1633 fp->rx_bd_prod = bd_prod_fw;
1634 fp->rx_comp_cons = sw_comp_cons;
1635 fp->rx_comp_prod = sw_comp_prod;
1637 /* Update producers */
1638 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1641 fp->rx_pkt += rx_pkt;
1647 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1649 struct bnx2x_fastpath *fp = fp_cookie;
1650 struct bnx2x *bp = fp->bp;
1651 int index = FP_IDX(fp);
1653 /* Return here if interrupt is disabled */
1654 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1655 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1659 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1660 index, FP_SB_ID(fp));
1661 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1663 #ifdef BNX2X_STOP_ON_ERROR
1664 if (unlikely(bp->panic))
1668 prefetch(fp->rx_cons_sb);
1669 prefetch(fp->tx_cons_sb);
1670 prefetch(&fp->status_blk->c_status_block.status_block_index);
1671 prefetch(&fp->status_blk->u_status_block.status_block_index);
1673 napi_schedule(&bnx2x_fp(bp, index, napi));
1678 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1680 struct bnx2x *bp = netdev_priv(dev_instance);
1681 u16 status = bnx2x_ack_int(bp);
1684 /* Return here if interrupt is shared and it's not for us */
1685 if (unlikely(status == 0)) {
1686 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1689 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1691 /* Return here if interrupt is disabled */
1692 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1693 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1697 #ifdef BNX2X_STOP_ON_ERROR
1698 if (unlikely(bp->panic))
1702 mask = 0x2 << bp->fp[0].sb_id;
1703 if (status & mask) {
1704 struct bnx2x_fastpath *fp = &bp->fp[0];
1706 prefetch(fp->rx_cons_sb);
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709 prefetch(&fp->status_blk->u_status_block.status_block_index);
1711 napi_schedule(&bnx2x_fp(bp, 0, napi));
1717 if (unlikely(status & 0x1)) {
1718 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1726 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1732 /* end of fast path */
1734 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1739 * General service functions
1742 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1745 u32 resource_bit = (1 << resource);
1746 int func = BP_FUNC(bp);
1747 u32 hw_lock_control_reg;
1750 /* Validating that the resource is within range */
1751 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1753 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1754 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1759 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1761 hw_lock_control_reg =
1762 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1765 /* Validating that the resource is not already taken */
1766 lock_status = REG_RD(bp, hw_lock_control_reg);
1767 if (lock_status & resource_bit) {
1768 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1769 lock_status, resource_bit);
1773 /* Try for 5 second every 5ms */
1774 for (cnt = 0; cnt < 1000; cnt++) {
1775 /* Try to acquire the lock */
1776 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1777 lock_status = REG_RD(bp, hw_lock_control_reg);
1778 if (lock_status & resource_bit)
1783 DP(NETIF_MSG_HW, "Timeout\n");
1787 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1790 u32 resource_bit = (1 << resource);
1791 int func = BP_FUNC(bp);
1792 u32 hw_lock_control_reg;
1794 /* Validating that the resource is within range */
1795 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1797 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1798 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1803 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1805 hw_lock_control_reg =
1806 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1809 /* Validating that the resource is currently taken */
1810 lock_status = REG_RD(bp, hw_lock_control_reg);
1811 if (!(lock_status & resource_bit)) {
1812 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1813 lock_status, resource_bit);
1817 REG_WR(bp, hw_lock_control_reg, resource_bit);
1821 /* HW Lock for shared dual port PHYs */
1822 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1824 mutex_lock(&bp->port.phy_mutex);
1826 if (bp->port.need_hw_lock)
1827 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1830 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1832 if (bp->port.need_hw_lock)
1833 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1835 mutex_unlock(&bp->port.phy_mutex);
1838 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1840 /* The GPIO should be swapped if swap register is set and active */
1841 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1842 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1843 int gpio_shift = gpio_num +
1844 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1845 u32 gpio_mask = (1 << gpio_shift);
1849 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1850 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1854 /* read GPIO value */
1855 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1857 /* get the requested pin value */
1858 if ((gpio_reg & gpio_mask) == gpio_mask)
1863 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1868 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1870 /* The GPIO should be swapped if swap register is set and active */
1871 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1872 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1873 int gpio_shift = gpio_num +
1874 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1875 u32 gpio_mask = (1 << gpio_shift);
1878 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1879 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1883 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1884 /* read GPIO and mask except the float bits */
1885 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1888 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1889 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1890 gpio_num, gpio_shift);
1891 /* clear FLOAT and set CLR */
1892 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1893 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1896 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1897 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1898 gpio_num, gpio_shift);
1899 /* clear FLOAT and set SET */
1900 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1901 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1904 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1905 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1906 gpio_num, gpio_shift);
1908 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1915 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1916 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1921 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1923 /* The GPIO should be swapped if swap register is set and active */
1924 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1925 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1926 int gpio_shift = gpio_num +
1927 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1928 u32 gpio_mask = (1 << gpio_shift);
1931 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1932 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1936 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1938 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1941 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1942 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1943 "output low\n", gpio_num, gpio_shift);
1944 /* clear SET and set CLR */
1945 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1946 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1949 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1950 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1951 "output high\n", gpio_num, gpio_shift);
1952 /* clear CLR and set SET */
1953 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1954 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1961 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1962 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1967 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1969 u32 spio_mask = (1 << spio_num);
1972 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1973 (spio_num > MISC_REGISTERS_SPIO_7)) {
1974 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1978 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1979 /* read SPIO and mask except the float bits */
1980 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1983 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1984 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1985 /* clear FLOAT and set CLR */
1986 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1987 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1990 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1991 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1992 /* clear FLOAT and set SET */
1993 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1994 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1997 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1998 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2000 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2007 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2008 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2013 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2015 switch (bp->link_vars.ieee_fc &
2016 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2017 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2018 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2021 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2022 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2025 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2026 bp->port.advertising |= ADVERTISED_Asym_Pause;
2029 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2035 static void bnx2x_link_report(struct bnx2x *bp)
2037 if (bp->link_vars.link_up) {
2038 if (bp->state == BNX2X_STATE_OPEN)
2039 netif_carrier_on(bp->dev);
2040 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2042 printk("%d Mbps ", bp->link_vars.line_speed);
2044 if (bp->link_vars.duplex == DUPLEX_FULL)
2045 printk("full duplex");
2047 printk("half duplex");
2049 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2050 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2051 printk(", receive ");
2052 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2053 printk("& transmit ");
2055 printk(", transmit ");
2057 printk("flow control ON");
2061 } else { /* link_down */
2062 netif_carrier_off(bp->dev);
2063 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2067 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2069 if (!BP_NOMCP(bp)) {
2072 /* Initialize link parameters structure variables */
2073 /* It is recommended to turn off RX FC for jumbo frames
2074 for better performance */
2076 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2077 else if (bp->dev->mtu > 5000)
2078 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2080 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2082 bnx2x_acquire_phy_lock(bp);
2083 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2084 bnx2x_release_phy_lock(bp);
2086 bnx2x_calc_fc_adv(bp);
2088 if (bp->link_vars.link_up)
2089 bnx2x_link_report(bp);
2094 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2098 static void bnx2x_link_set(struct bnx2x *bp)
2100 if (!BP_NOMCP(bp)) {
2101 bnx2x_acquire_phy_lock(bp);
2102 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2103 bnx2x_release_phy_lock(bp);
2105 bnx2x_calc_fc_adv(bp);
2107 BNX2X_ERR("Bootcode is missing -not setting link\n");
2110 static void bnx2x__link_reset(struct bnx2x *bp)
2112 if (!BP_NOMCP(bp)) {
2113 bnx2x_acquire_phy_lock(bp);
2114 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2115 bnx2x_release_phy_lock(bp);
2117 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2120 static u8 bnx2x_link_test(struct bnx2x *bp)
2124 bnx2x_acquire_phy_lock(bp);
2125 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2126 bnx2x_release_phy_lock(bp);
2131 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2133 u32 r_param = bp->link_vars.line_speed / 8;
2134 u32 fair_periodic_timeout_usec;
2137 memset(&(bp->cmng.rs_vars), 0,
2138 sizeof(struct rate_shaping_vars_per_port));
2139 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2141 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2142 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2144 /* this is the threshold below which no timer arming will occur
2145 1.25 coefficient is for the threshold to be a little bigger
2146 than the real time, to compensate for timer in-accuracy */
2147 bp->cmng.rs_vars.rs_threshold =
2148 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2150 /* resolution of fairness timer */
2151 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2152 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2153 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2155 /* this is the threshold below which we won't arm the timer anymore */
2156 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2158 /* we multiply by 1e3/8 to get bytes/msec.
2159 We don't want the credits to pass a credit
2160 of the t_fair*FAIR_MEM (algorithm resolution) */
2161 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2162 /* since each tick is 4 usec */
2163 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2166 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2168 struct rate_shaping_vars_per_vn m_rs_vn;
2169 struct fairness_vars_per_vn m_fair_vn;
2170 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2171 u16 vn_min_rate, vn_max_rate;
2174 /* If function is hidden - set min and max to zeroes */
2175 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2180 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2181 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2182 /* If fairness is enabled (not all min rates are zeroes) and
2183 if current min rate is zero - set it to 1.
2184 This is a requirement of the algorithm. */
2185 if (bp->vn_weight_sum && (vn_min_rate == 0))
2186 vn_min_rate = DEF_MIN_RATE;
2187 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2188 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2192 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2193 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2195 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2196 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2198 /* global vn counter - maximal Mbps for this vn */
2199 m_rs_vn.vn_counter.rate = vn_max_rate;
2201 /* quota - number of bytes transmitted in this period */
2202 m_rs_vn.vn_counter.quota =
2203 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2205 if (bp->vn_weight_sum) {
2206 /* credit for each period of the fairness algorithm:
2207 number of bytes in T_FAIR (the vn share the port rate).
2208 vn_weight_sum should not be larger than 10000, thus
2209 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2211 m_fair_vn.vn_credit_delta =
2212 max((u32)(vn_min_rate * (T_FAIR_COEF /
2213 (8 * bp->vn_weight_sum))),
2214 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2215 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2216 m_fair_vn.vn_credit_delta);
2219 /* Store it to internal memory */
2220 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2221 REG_WR(bp, BAR_XSTRORM_INTMEM +
2222 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2223 ((u32 *)(&m_rs_vn))[i]);
2225 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2226 REG_WR(bp, BAR_XSTRORM_INTMEM +
2227 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2228 ((u32 *)(&m_fair_vn))[i]);
2232 /* This function is called upon link interrupt */
2233 static void bnx2x_link_attn(struct bnx2x *bp)
2235 /* Make sure that we are synced with the current statistics */
2236 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2238 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2240 if (bp->link_vars.link_up) {
2242 /* dropless flow control */
2243 if (CHIP_IS_E1H(bp)) {
2244 int port = BP_PORT(bp);
2245 u32 pause_enabled = 0;
2247 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2250 REG_WR(bp, BAR_USTRORM_INTMEM +
2251 USTORM_PAUSE_ENABLED_OFFSET(port),
2255 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2256 struct host_port_stats *pstats;
2258 pstats = bnx2x_sp(bp, port_stats);
2259 /* reset old bmac stats */
2260 memset(&(pstats->mac_stx[0]), 0,
2261 sizeof(struct mac_stx));
2263 if ((bp->state == BNX2X_STATE_OPEN) ||
2264 (bp->state == BNX2X_STATE_DISABLED))
2265 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2268 /* indicate link status */
2269 bnx2x_link_report(bp);
2272 int port = BP_PORT(bp);
2276 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2277 if (vn == BP_E1HVN(bp))
2280 func = ((vn << 1) | port);
2282 /* Set the attention towards other drivers
2284 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2285 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2288 if (bp->link_vars.link_up) {
2291 /* Init rate shaping and fairness contexts */
2292 bnx2x_init_port_minmax(bp);
2294 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2295 bnx2x_init_vn_minmax(bp, 2*vn + port);
2297 /* Store it to internal memory */
2299 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2300 REG_WR(bp, BAR_XSTRORM_INTMEM +
2301 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2302 ((u32 *)(&bp->cmng))[i]);
2307 static void bnx2x__link_status_update(struct bnx2x *bp)
2309 if (bp->state != BNX2X_STATE_OPEN)
2312 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2314 if (bp->link_vars.link_up)
2315 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2317 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2319 /* indicate link status */
2320 bnx2x_link_report(bp);
2323 static void bnx2x_pmf_update(struct bnx2x *bp)
2325 int port = BP_PORT(bp);
2329 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2331 /* enable nig attention */
2332 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2333 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2334 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2336 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2344 * General service functions
2347 /* the slow path queue is odd since completions arrive on the fastpath ring */
2348 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2349 u32 data_hi, u32 data_lo, int common)
2351 int func = BP_FUNC(bp);
2353 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2354 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2355 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2356 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2357 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2359 #ifdef BNX2X_STOP_ON_ERROR
2360 if (unlikely(bp->panic))
2364 spin_lock_bh(&bp->spq_lock);
2366 if (!bp->spq_left) {
2367 BNX2X_ERR("BUG! SPQ ring full!\n");
2368 spin_unlock_bh(&bp->spq_lock);
2373 /* CID needs port number to be encoded int it */
2374 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2375 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2377 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2379 bp->spq_prod_bd->hdr.type |=
2380 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2382 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2383 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2387 if (bp->spq_prod_bd == bp->spq_last_bd) {
2388 bp->spq_prod_bd = bp->spq;
2389 bp->spq_prod_idx = 0;
2390 DP(NETIF_MSG_TIMER, "end of spq\n");
2397 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2400 spin_unlock_bh(&bp->spq_lock);
2404 /* acquire split MCP access lock register */
2405 static int bnx2x_acquire_alr(struct bnx2x *bp)
2412 for (j = 0; j < i*10; j++) {
2414 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2415 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2416 if (val & (1L << 31))
2421 if (!(val & (1L << 31))) {
2422 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2429 /* release split MCP access lock register */
2430 static void bnx2x_release_alr(struct bnx2x *bp)
2434 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2437 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2439 struct host_def_status_block *def_sb = bp->def_status_blk;
2442 barrier(); /* status block is written to by the chip */
2443 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2444 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2447 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2448 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2451 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2452 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2455 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2456 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2459 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2460 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2467 * slow path service functions
2470 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2472 int port = BP_PORT(bp);
2473 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2474 COMMAND_REG_ATTN_BITS_SET);
2475 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2476 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2477 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2478 NIG_REG_MASK_INTERRUPT_PORT0;
2482 if (bp->attn_state & asserted)
2483 BNX2X_ERR("IGU ERROR\n");
2485 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2486 aeu_mask = REG_RD(bp, aeu_addr);
2488 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2489 aeu_mask, asserted);
2490 aeu_mask &= ~(asserted & 0xff);
2491 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2493 REG_WR(bp, aeu_addr, aeu_mask);
2494 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2496 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2497 bp->attn_state |= asserted;
2498 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2500 if (asserted & ATTN_HARD_WIRED_MASK) {
2501 if (asserted & ATTN_NIG_FOR_FUNC) {
2503 bnx2x_acquire_phy_lock(bp);
2505 /* save nig interrupt mask */
2506 nig_mask = REG_RD(bp, nig_int_mask_addr);
2507 REG_WR(bp, nig_int_mask_addr, 0);
2509 bnx2x_link_attn(bp);
2511 /* handle unicore attn? */
2513 if (asserted & ATTN_SW_TIMER_4_FUNC)
2514 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2516 if (asserted & GPIO_2_FUNC)
2517 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2519 if (asserted & GPIO_3_FUNC)
2520 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2522 if (asserted & GPIO_4_FUNC)
2523 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2526 if (asserted & ATTN_GENERAL_ATTN_1) {
2527 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2528 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2530 if (asserted & ATTN_GENERAL_ATTN_2) {
2531 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2532 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2534 if (asserted & ATTN_GENERAL_ATTN_3) {
2535 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2536 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2539 if (asserted & ATTN_GENERAL_ATTN_4) {
2540 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2541 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2543 if (asserted & ATTN_GENERAL_ATTN_5) {
2544 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2545 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2547 if (asserted & ATTN_GENERAL_ATTN_6) {
2548 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2549 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2553 } /* if hardwired */
2555 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2557 REG_WR(bp, hc_addr, asserted);
2559 /* now set back the mask */
2560 if (asserted & ATTN_NIG_FOR_FUNC) {
2561 REG_WR(bp, nig_int_mask_addr, nig_mask);
2562 bnx2x_release_phy_lock(bp);
2566 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2568 int port = BP_PORT(bp);
2572 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2573 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2575 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2577 val = REG_RD(bp, reg_offset);
2578 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2579 REG_WR(bp, reg_offset, val);
2581 BNX2X_ERR("SPIO5 hw attention\n");
2583 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2584 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2585 /* Fan failure attention */
2587 /* The PHY reset is controlled by GPIO 1 */
2588 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2589 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2590 /* Low power mode is controlled by GPIO 2 */
2591 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2592 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2593 /* mark the failure */
2594 bp->link_params.ext_phy_config &=
2595 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2596 bp->link_params.ext_phy_config |=
2597 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2599 dev_info.port_hw_config[port].
2600 external_phy_config,
2601 bp->link_params.ext_phy_config);
2602 /* log the failure */
2603 printk(KERN_ERR PFX "Fan Failure on Network"
2604 " Controller %s has caused the driver to"
2605 " shutdown the card to prevent permanent"
2606 " damage. Please contact Dell Support for"
2607 " assistance\n", bp->dev->name);
2615 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2616 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2617 bnx2x_acquire_phy_lock(bp);
2618 bnx2x_handle_module_detect_int(&bp->link_params);
2619 bnx2x_release_phy_lock(bp);
2622 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2624 val = REG_RD(bp, reg_offset);
2625 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2626 REG_WR(bp, reg_offset, val);
2628 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2629 (attn & HW_INTERRUT_ASSERT_SET_0));
2634 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2638 if (attn & BNX2X_DOORQ_ASSERT) {
2640 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2641 BNX2X_ERR("DB hw attention 0x%x\n", val);
2642 /* DORQ discard attention */
2644 BNX2X_ERR("FATAL error from DORQ\n");
2647 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2649 int port = BP_PORT(bp);
2652 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2653 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2655 val = REG_RD(bp, reg_offset);
2656 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2657 REG_WR(bp, reg_offset, val);
2659 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2660 (attn & HW_INTERRUT_ASSERT_SET_1));
2665 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2669 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2671 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2672 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2673 /* CFC error attention */
2675 BNX2X_ERR("FATAL error from CFC\n");
2678 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2680 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2681 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2682 /* RQ_USDMDP_FIFO_OVERFLOW */
2684 BNX2X_ERR("FATAL error from PXP\n");
2687 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2689 int port = BP_PORT(bp);
2692 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2693 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2695 val = REG_RD(bp, reg_offset);
2696 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2697 REG_WR(bp, reg_offset, val);
2699 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2700 (attn & HW_INTERRUT_ASSERT_SET_2));
2705 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2709 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2711 if (attn & BNX2X_PMF_LINK_ASSERT) {
2712 int func = BP_FUNC(bp);
2714 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2715 bnx2x__link_status_update(bp);
2716 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2718 bnx2x_pmf_update(bp);
2720 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2722 BNX2X_ERR("MC assert!\n");
2723 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2724 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2725 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2726 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2729 } else if (attn & BNX2X_MCP_ASSERT) {
2731 BNX2X_ERR("MCP assert!\n");
2732 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2736 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2739 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2740 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2741 if (attn & BNX2X_GRC_TIMEOUT) {
2742 val = CHIP_IS_E1H(bp) ?
2743 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2744 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2746 if (attn & BNX2X_GRC_RSV) {
2747 val = CHIP_IS_E1H(bp) ?
2748 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2749 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2751 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2755 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2757 struct attn_route attn;
2758 struct attn_route group_mask;
2759 int port = BP_PORT(bp);
2765 /* need to take HW lock because MCP or other port might also
2766 try to handle this event */
2767 bnx2x_acquire_alr(bp);
2769 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2770 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2771 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2772 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2773 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2774 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2776 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2777 if (deasserted & (1 << index)) {
2778 group_mask = bp->attn_group[index];
2780 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2781 index, group_mask.sig[0], group_mask.sig[1],
2782 group_mask.sig[2], group_mask.sig[3]);
2784 bnx2x_attn_int_deasserted3(bp,
2785 attn.sig[3] & group_mask.sig[3]);
2786 bnx2x_attn_int_deasserted1(bp,
2787 attn.sig[1] & group_mask.sig[1]);
2788 bnx2x_attn_int_deasserted2(bp,
2789 attn.sig[2] & group_mask.sig[2]);
2790 bnx2x_attn_int_deasserted0(bp,
2791 attn.sig[0] & group_mask.sig[0]);
2793 if ((attn.sig[0] & group_mask.sig[0] &
2794 HW_PRTY_ASSERT_SET_0) ||
2795 (attn.sig[1] & group_mask.sig[1] &
2796 HW_PRTY_ASSERT_SET_1) ||
2797 (attn.sig[2] & group_mask.sig[2] &
2798 HW_PRTY_ASSERT_SET_2))
2799 BNX2X_ERR("FATAL HW block parity attention\n");
2803 bnx2x_release_alr(bp);
2805 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2808 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2810 REG_WR(bp, reg_addr, val);
2812 if (~bp->attn_state & deasserted)
2813 BNX2X_ERR("IGU ERROR\n");
2815 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2816 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2818 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2819 aeu_mask = REG_RD(bp, reg_addr);
2821 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2822 aeu_mask, deasserted);
2823 aeu_mask |= (deasserted & 0xff);
2824 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2826 REG_WR(bp, reg_addr, aeu_mask);
2827 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2829 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2830 bp->attn_state &= ~deasserted;
2831 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2834 static void bnx2x_attn_int(struct bnx2x *bp)
2836 /* read local copy of bits */
2837 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2839 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2841 u32 attn_state = bp->attn_state;
2843 /* look for changed bits */
2844 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2845 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2848 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2849 attn_bits, attn_ack, asserted, deasserted);
2851 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2852 BNX2X_ERR("BAD attention state\n");
2854 /* handle bits that were raised */
2856 bnx2x_attn_int_asserted(bp, asserted);
2859 bnx2x_attn_int_deasserted(bp, deasserted);
2862 static void bnx2x_sp_task(struct work_struct *work)
2864 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2868 /* Return here if interrupt is disabled */
2869 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2870 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2874 status = bnx2x_update_dsb_idx(bp);
2875 /* if (status == 0) */
2876 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2878 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2884 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2886 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2888 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2890 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2892 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2897 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2899 struct net_device *dev = dev_instance;
2900 struct bnx2x *bp = netdev_priv(dev);
2902 /* Return here if interrupt is disabled */
2903 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2904 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2908 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2910 #ifdef BNX2X_STOP_ON_ERROR
2911 if (unlikely(bp->panic))
2915 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2920 /* end of slow path */
2924 /****************************************************************************
2926 ****************************************************************************/
2928 /* sum[hi:lo] += add[hi:lo] */
2929 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2932 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2935 /* difference = minuend - subtrahend */
2936 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2938 if (m_lo < s_lo) { \
2940 d_hi = m_hi - s_hi; \
2942 /* we can 'loan' 1 */ \
2944 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2946 /* m_hi <= s_hi */ \
2951 /* m_lo >= s_lo */ \
2952 if (m_hi < s_hi) { \
2956 /* m_hi >= s_hi */ \
2957 d_hi = m_hi - s_hi; \
2958 d_lo = m_lo - s_lo; \
2963 #define UPDATE_STAT64(s, t) \
2965 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2966 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2967 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2968 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2969 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2970 pstats->mac_stx[1].t##_lo, diff.lo); \
2973 #define UPDATE_STAT64_NIG(s, t) \
2975 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2976 diff.lo, new->s##_lo, old->s##_lo); \
2977 ADD_64(estats->t##_hi, diff.hi, \
2978 estats->t##_lo, diff.lo); \
2981 /* sum[hi:lo] += add */
2982 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2985 s_hi += (s_lo < a) ? 1 : 0; \
2988 #define UPDATE_EXTEND_STAT(s) \
2990 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2991 pstats->mac_stx[1].s##_lo, \
2995 #define UPDATE_EXTEND_TSTAT(s, t) \
2997 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2998 old_tclient->s = le32_to_cpu(tclient->s); \
2999 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3002 #define UPDATE_EXTEND_USTAT(s, t) \
3004 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3005 old_uclient->s = uclient->s; \
3006 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3009 #define UPDATE_EXTEND_XSTAT(s, t) \
3011 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3012 old_xclient->s = le32_to_cpu(xclient->s); \
3013 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3016 /* minuend -= subtrahend */
3017 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3019 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3022 /* minuend[hi:lo] -= subtrahend */
3023 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3025 SUB_64(m_hi, 0, m_lo, s); \
3028 #define SUB_EXTEND_USTAT(s, t) \
3030 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3031 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3035 * General service functions
3038 static inline long bnx2x_hilo(u32 *hiref)
3040 u32 lo = *(hiref + 1);
3041 #if (BITS_PER_LONG == 64)
3044 return HILO_U64(hi, lo);
3051 * Init service functions
3054 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3056 if (!bp->stats_pending) {
3057 struct eth_query_ramrod_data ramrod_data = {0};
3060 ramrod_data.drv_counter = bp->stats_counter++;
3061 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3062 for_each_queue(bp, i)
3063 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3065 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3066 ((u32 *)&ramrod_data)[1],
3067 ((u32 *)&ramrod_data)[0], 0);
3069 /* stats ramrod has it's own slot on the spq */
3071 bp->stats_pending = 1;
3076 static void bnx2x_stats_init(struct bnx2x *bp)
3078 int port = BP_PORT(bp);
3081 bp->stats_pending = 0;
3082 bp->executer_idx = 0;
3083 bp->stats_counter = 0;
3087 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3089 bp->port.port_stx = 0;
3090 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3092 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3093 bp->port.old_nig_stats.brb_discard =
3094 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3095 bp->port.old_nig_stats.brb_truncate =
3096 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3097 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3098 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3099 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3100 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3102 /* function stats */
3103 for_each_queue(bp, i) {
3104 struct bnx2x_fastpath *fp = &bp->fp[i];
3106 memset(&fp->old_tclient, 0,
3107 sizeof(struct tstorm_per_client_stats));
3108 memset(&fp->old_uclient, 0,
3109 sizeof(struct ustorm_per_client_stats));
3110 memset(&fp->old_xclient, 0,
3111 sizeof(struct xstorm_per_client_stats));
3112 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3115 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3116 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3118 bp->stats_state = STATS_STATE_DISABLED;
3119 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3120 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3123 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3125 struct dmae_command *dmae = &bp->stats_dmae;
3126 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3128 *stats_comp = DMAE_COMP_VAL;
3129 if (CHIP_REV_IS_SLOW(bp))
3133 if (bp->executer_idx) {
3134 int loader_idx = PMF_DMAE_C(bp);
3136 memset(dmae, 0, sizeof(struct dmae_command));
3138 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3139 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3140 DMAE_CMD_DST_RESET |
3142 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3144 DMAE_CMD_ENDIANITY_DW_SWAP |
3146 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3148 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3149 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3150 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3151 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3152 sizeof(struct dmae_command) *
3153 (loader_idx + 1)) >> 2;
3154 dmae->dst_addr_hi = 0;
3155 dmae->len = sizeof(struct dmae_command) >> 2;
3158 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3159 dmae->comp_addr_hi = 0;
3163 bnx2x_post_dmae(bp, dmae, loader_idx);
3165 } else if (bp->func_stx) {
3167 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3171 static int bnx2x_stats_comp(struct bnx2x *bp)
3173 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3177 while (*stats_comp != DMAE_COMP_VAL) {
3179 BNX2X_ERR("timeout waiting for stats finished\n");
3189 * Statistics service functions
3192 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3194 struct dmae_command *dmae;
3196 int loader_idx = PMF_DMAE_C(bp);
3197 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3200 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3201 BNX2X_ERR("BUG!\n");
3205 bp->executer_idx = 0;
3207 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3209 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3211 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3213 DMAE_CMD_ENDIANITY_DW_SWAP |
3215 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3216 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3218 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3220 dmae->src_addr_lo = bp->port.port_stx >> 2;
3221 dmae->src_addr_hi = 0;
3222 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3223 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3224 dmae->len = DMAE_LEN32_RD_MAX;
3225 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226 dmae->comp_addr_hi = 0;
3229 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3230 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3231 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3232 dmae->src_addr_hi = 0;
3233 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3234 DMAE_LEN32_RD_MAX * 4);
3235 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3236 DMAE_LEN32_RD_MAX * 4);
3237 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3238 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3239 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3240 dmae->comp_val = DMAE_COMP_VAL;
3243 bnx2x_hw_stats_post(bp);
3244 bnx2x_stats_comp(bp);
3247 static void bnx2x_port_stats_init(struct bnx2x *bp)
3249 struct dmae_command *dmae;
3250 int port = BP_PORT(bp);
3251 int vn = BP_E1HVN(bp);
3253 int loader_idx = PMF_DMAE_C(bp);
3255 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3258 if (!bp->link_vars.link_up || !bp->port.pmf) {
3259 BNX2X_ERR("BUG!\n");
3263 bp->executer_idx = 0;
3266 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3267 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3268 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3270 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3272 DMAE_CMD_ENDIANITY_DW_SWAP |
3274 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3275 (vn << DMAE_CMD_E1HVN_SHIFT));
3277 if (bp->port.port_stx) {
3279 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3280 dmae->opcode = opcode;
3281 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3282 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3283 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3284 dmae->dst_addr_hi = 0;
3285 dmae->len = sizeof(struct host_port_stats) >> 2;
3286 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3287 dmae->comp_addr_hi = 0;
3293 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3294 dmae->opcode = opcode;
3295 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3296 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3297 dmae->dst_addr_lo = bp->func_stx >> 2;
3298 dmae->dst_addr_hi = 0;
3299 dmae->len = sizeof(struct host_func_stats) >> 2;
3300 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3301 dmae->comp_addr_hi = 0;
3306 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3307 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3308 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3310 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3312 DMAE_CMD_ENDIANITY_DW_SWAP |
3314 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3315 (vn << DMAE_CMD_E1HVN_SHIFT));
3317 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3319 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3320 NIG_REG_INGRESS_BMAC0_MEM);
3322 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3323 BIGMAC_REGISTER_TX_STAT_GTBYT */
3324 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3325 dmae->opcode = opcode;
3326 dmae->src_addr_lo = (mac_addr +
3327 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3328 dmae->src_addr_hi = 0;
3329 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3330 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3331 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3332 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3333 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334 dmae->comp_addr_hi = 0;
3337 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3338 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (mac_addr +
3342 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3345 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3346 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3347 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3348 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3349 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3350 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351 dmae->comp_addr_hi = 0;
3354 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3356 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3358 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3359 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360 dmae->opcode = opcode;
3361 dmae->src_addr_lo = (mac_addr +
3362 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3363 dmae->src_addr_hi = 0;
3364 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3365 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3366 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3367 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3368 dmae->comp_addr_hi = 0;
3371 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3372 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3373 dmae->opcode = opcode;
3374 dmae->src_addr_lo = (mac_addr +
3375 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3376 dmae->src_addr_hi = 0;
3377 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3378 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3379 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3380 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3382 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3383 dmae->comp_addr_hi = 0;
3386 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388 dmae->opcode = opcode;
3389 dmae->src_addr_lo = (mac_addr +
3390 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3391 dmae->src_addr_hi = 0;
3392 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3393 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3394 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3395 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3396 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3397 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3398 dmae->comp_addr_hi = 0;
3403 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3404 dmae->opcode = opcode;
3405 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3406 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3407 dmae->src_addr_hi = 0;
3408 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3409 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3410 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3411 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3412 dmae->comp_addr_hi = 0;
3415 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3416 dmae->opcode = opcode;
3417 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3418 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3419 dmae->src_addr_hi = 0;
3420 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3421 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3422 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3423 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3424 dmae->len = (2*sizeof(u32)) >> 2;
3425 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3426 dmae->comp_addr_hi = 0;
3429 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3430 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3431 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3432 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3434 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3436 DMAE_CMD_ENDIANITY_DW_SWAP |
3438 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3439 (vn << DMAE_CMD_E1HVN_SHIFT));
3440 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3441 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3442 dmae->src_addr_hi = 0;
3443 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3444 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3445 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3446 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3447 dmae->len = (2*sizeof(u32)) >> 2;
3448 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3449 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3450 dmae->comp_val = DMAE_COMP_VAL;
3455 static void bnx2x_func_stats_init(struct bnx2x *bp)
3457 struct dmae_command *dmae = &bp->stats_dmae;
3458 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3461 if (!bp->func_stx) {
3462 BNX2X_ERR("BUG!\n");
3466 bp->executer_idx = 0;
3467 memset(dmae, 0, sizeof(struct dmae_command));
3469 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3470 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3471 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3473 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3475 DMAE_CMD_ENDIANITY_DW_SWAP |
3477 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3478 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3479 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3480 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3481 dmae->dst_addr_lo = bp->func_stx >> 2;
3482 dmae->dst_addr_hi = 0;
3483 dmae->len = sizeof(struct host_func_stats) >> 2;
3484 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3486 dmae->comp_val = DMAE_COMP_VAL;
3491 static void bnx2x_stats_start(struct bnx2x *bp)
3494 bnx2x_port_stats_init(bp);
3496 else if (bp->func_stx)
3497 bnx2x_func_stats_init(bp);
3499 bnx2x_hw_stats_post(bp);
3500 bnx2x_storm_stats_post(bp);
3503 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3505 bnx2x_stats_comp(bp);
3506 bnx2x_stats_pmf_update(bp);
3507 bnx2x_stats_start(bp);
3510 static void bnx2x_stats_restart(struct bnx2x *bp)
3512 bnx2x_stats_comp(bp);
3513 bnx2x_stats_start(bp);
3516 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3518 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3519 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3521 struct regpair diff;
3523 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3524 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3525 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3526 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3527 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3528 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3529 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3530 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3531 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3532 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3533 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3534 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3535 UPDATE_STAT64(tx_stat_gt127,
3536 tx_stat_etherstatspkts65octetsto127octets);
3537 UPDATE_STAT64(tx_stat_gt255,
3538 tx_stat_etherstatspkts128octetsto255octets);
3539 UPDATE_STAT64(tx_stat_gt511,
3540 tx_stat_etherstatspkts256octetsto511octets);
3541 UPDATE_STAT64(tx_stat_gt1023,
3542 tx_stat_etherstatspkts512octetsto1023octets);
3543 UPDATE_STAT64(tx_stat_gt1518,
3544 tx_stat_etherstatspkts1024octetsto1522octets);
3545 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3546 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3547 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3548 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3549 UPDATE_STAT64(tx_stat_gterr,
3550 tx_stat_dot3statsinternalmactransmiterrors);
3551 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3553 estats->pause_frames_received_hi =
3554 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3555 estats->pause_frames_received_lo =
3556 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3558 estats->pause_frames_sent_hi =
3559 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3560 estats->pause_frames_sent_lo =
3561 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3564 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3566 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3567 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3568 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3570 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3571 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3572 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3573 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3574 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3575 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3576 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3577 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3578 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3579 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3580 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3581 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3582 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3583 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3584 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3585 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3586 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3587 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3588 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3589 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3590 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3591 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3592 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3593 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3594 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3595 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3596 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3597 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3598 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3599 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3600 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3602 estats->pause_frames_received_hi =
3603 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3604 estats->pause_frames_received_lo =
3605 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3606 ADD_64(estats->pause_frames_received_hi,
3607 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3608 estats->pause_frames_received_lo,
3609 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3611 estats->pause_frames_sent_hi =
3612 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3613 estats->pause_frames_sent_lo =
3614 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3615 ADD_64(estats->pause_frames_sent_hi,
3616 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3617 estats->pause_frames_sent_lo,
3618 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3621 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3623 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3624 struct nig_stats *old = &(bp->port.old_nig_stats);
3625 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3626 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3627 struct regpair diff;
3630 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3631 bnx2x_bmac_stats_update(bp);
3633 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3634 bnx2x_emac_stats_update(bp);
3636 else { /* unreached */
3637 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3641 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3642 new->brb_discard - old->brb_discard);
3643 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3644 new->brb_truncate - old->brb_truncate);
3646 UPDATE_STAT64_NIG(egress_mac_pkt0,
3647 etherstatspkts1024octetsto1522octets);
3648 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3650 memcpy(old, new, sizeof(struct nig_stats));
3652 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3653 sizeof(struct mac_stx));
3654 estats->brb_drop_hi = pstats->brb_drop_hi;
3655 estats->brb_drop_lo = pstats->brb_drop_lo;
3657 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3659 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3660 if (nig_timer_max != estats->nig_timer_max) {
3661 estats->nig_timer_max = nig_timer_max;
3662 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3668 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3670 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3671 struct tstorm_per_port_stats *tport =
3672 &stats->tstorm_common.port_statistics;
3673 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3674 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3677 memset(&(fstats->total_bytes_received_hi), 0,
3678 sizeof(struct host_func_stats) - 2*sizeof(u32));
3679 estats->error_bytes_received_hi = 0;
3680 estats->error_bytes_received_lo = 0;
3681 estats->etherstatsoverrsizepkts_hi = 0;
3682 estats->etherstatsoverrsizepkts_lo = 0;
3683 estats->no_buff_discard_hi = 0;
3684 estats->no_buff_discard_lo = 0;
3686 for_each_queue(bp, i) {
3687 struct bnx2x_fastpath *fp = &bp->fp[i];
3688 int cl_id = fp->cl_id;
3689 struct tstorm_per_client_stats *tclient =
3690 &stats->tstorm_common.client_statistics[cl_id];
3691 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3692 struct ustorm_per_client_stats *uclient =
3693 &stats->ustorm_common.client_statistics[cl_id];
3694 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3695 struct xstorm_per_client_stats *xclient =
3696 &stats->xstorm_common.client_statistics[cl_id];
3697 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3698 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3701 /* are storm stats valid? */
3702 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3703 bp->stats_counter) {
3704 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3705 " xstorm counter (%d) != stats_counter (%d)\n",
3706 i, xclient->stats_counter, bp->stats_counter);
3709 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3710 bp->stats_counter) {
3711 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3712 " tstorm counter (%d) != stats_counter (%d)\n",
3713 i, tclient->stats_counter, bp->stats_counter);
3716 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3717 bp->stats_counter) {
3718 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3719 " ustorm counter (%d) != stats_counter (%d)\n",
3720 i, uclient->stats_counter, bp->stats_counter);
3724 qstats->total_bytes_received_hi =
3725 qstats->valid_bytes_received_hi =
3726 le32_to_cpu(tclient->total_rcv_bytes.hi);
3727 qstats->total_bytes_received_lo =
3728 qstats->valid_bytes_received_lo =
3729 le32_to_cpu(tclient->total_rcv_bytes.lo);
3731 qstats->error_bytes_received_hi =
3732 le32_to_cpu(tclient->rcv_error_bytes.hi);
3733 qstats->error_bytes_received_lo =
3734 le32_to_cpu(tclient->rcv_error_bytes.lo);
3736 ADD_64(qstats->total_bytes_received_hi,
3737 qstats->error_bytes_received_hi,
3738 qstats->total_bytes_received_lo,
3739 qstats->error_bytes_received_lo);
3741 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3742 total_unicast_packets_received);
3743 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3744 total_multicast_packets_received);
3745 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3746 total_broadcast_packets_received);
3747 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3748 etherstatsoverrsizepkts);
3749 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3751 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3752 total_unicast_packets_received);
3753 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3754 total_multicast_packets_received);
3755 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3756 total_broadcast_packets_received);
3757 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3758 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3759 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3761 qstats->total_bytes_transmitted_hi =
3762 le32_to_cpu(xclient->total_sent_bytes.hi);
3763 qstats->total_bytes_transmitted_lo =
3764 le32_to_cpu(xclient->total_sent_bytes.lo);
3766 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3767 total_unicast_packets_transmitted);
3768 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3769 total_multicast_packets_transmitted);
3770 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3771 total_broadcast_packets_transmitted);
3773 old_tclient->checksum_discard = tclient->checksum_discard;
3774 old_tclient->ttl0_discard = tclient->ttl0_discard;
3776 ADD_64(fstats->total_bytes_received_hi,
3777 qstats->total_bytes_received_hi,
3778 fstats->total_bytes_received_lo,
3779 qstats->total_bytes_received_lo);
3780 ADD_64(fstats->total_bytes_transmitted_hi,
3781 qstats->total_bytes_transmitted_hi,
3782 fstats->total_bytes_transmitted_lo,
3783 qstats->total_bytes_transmitted_lo);
3784 ADD_64(fstats->total_unicast_packets_received_hi,
3785 qstats->total_unicast_packets_received_hi,
3786 fstats->total_unicast_packets_received_lo,
3787 qstats->total_unicast_packets_received_lo);
3788 ADD_64(fstats->total_multicast_packets_received_hi,
3789 qstats->total_multicast_packets_received_hi,
3790 fstats->total_multicast_packets_received_lo,
3791 qstats->total_multicast_packets_received_lo);
3792 ADD_64(fstats->total_broadcast_packets_received_hi,
3793 qstats->total_broadcast_packets_received_hi,
3794 fstats->total_broadcast_packets_received_lo,
3795 qstats->total_broadcast_packets_received_lo);
3796 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3797 qstats->total_unicast_packets_transmitted_hi,
3798 fstats->total_unicast_packets_transmitted_lo,
3799 qstats->total_unicast_packets_transmitted_lo);
3800 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3801 qstats->total_multicast_packets_transmitted_hi,
3802 fstats->total_multicast_packets_transmitted_lo,
3803 qstats->total_multicast_packets_transmitted_lo);
3804 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3805 qstats->total_broadcast_packets_transmitted_hi,
3806 fstats->total_broadcast_packets_transmitted_lo,
3807 qstats->total_broadcast_packets_transmitted_lo);
3808 ADD_64(fstats->valid_bytes_received_hi,
3809 qstats->valid_bytes_received_hi,
3810 fstats->valid_bytes_received_lo,
3811 qstats->valid_bytes_received_lo);
3813 ADD_64(estats->error_bytes_received_hi,
3814 qstats->error_bytes_received_hi,
3815 estats->error_bytes_received_lo,
3816 qstats->error_bytes_received_lo);
3817 ADD_64(estats->etherstatsoverrsizepkts_hi,
3818 qstats->etherstatsoverrsizepkts_hi,
3819 estats->etherstatsoverrsizepkts_lo,
3820 qstats->etherstatsoverrsizepkts_lo);
3821 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3822 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3825 ADD_64(fstats->total_bytes_received_hi,
3826 estats->rx_stat_ifhcinbadoctets_hi,
3827 fstats->total_bytes_received_lo,
3828 estats->rx_stat_ifhcinbadoctets_lo);
3830 memcpy(estats, &(fstats->total_bytes_received_hi),
3831 sizeof(struct host_func_stats) - 2*sizeof(u32));
3833 ADD_64(estats->etherstatsoverrsizepkts_hi,
3834 estats->rx_stat_dot3statsframestoolong_hi,
3835 estats->etherstatsoverrsizepkts_lo,
3836 estats->rx_stat_dot3statsframestoolong_lo);
3837 ADD_64(estats->error_bytes_received_hi,
3838 estats->rx_stat_ifhcinbadoctets_hi,
3839 estats->error_bytes_received_lo,
3840 estats->rx_stat_ifhcinbadoctets_lo);
3843 estats->mac_filter_discard =
3844 le32_to_cpu(tport->mac_filter_discard);
3845 estats->xxoverflow_discard =
3846 le32_to_cpu(tport->xxoverflow_discard);
3847 estats->brb_truncate_discard =
3848 le32_to_cpu(tport->brb_truncate_discard);
3849 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3852 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3854 bp->stats_pending = 0;
3859 static void bnx2x_net_stats_update(struct bnx2x *bp)
3861 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3862 struct net_device_stats *nstats = &bp->dev->stats;
3865 nstats->rx_packets =
3866 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3867 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3868 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3870 nstats->tx_packets =
3871 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3872 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3873 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3875 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3877 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3879 nstats->rx_dropped = estats->mac_discard;
3880 for_each_queue(bp, i)
3881 nstats->rx_dropped +=
3882 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3884 nstats->tx_dropped = 0;
3887 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3889 nstats->collisions =
3890 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3892 nstats->rx_length_errors =
3893 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3894 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3895 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3896 bnx2x_hilo(&estats->brb_truncate_hi);
3897 nstats->rx_crc_errors =
3898 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3899 nstats->rx_frame_errors =
3900 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3901 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3902 nstats->rx_missed_errors = estats->xxoverflow_discard;
3904 nstats->rx_errors = nstats->rx_length_errors +
3905 nstats->rx_over_errors +
3906 nstats->rx_crc_errors +
3907 nstats->rx_frame_errors +
3908 nstats->rx_fifo_errors +
3909 nstats->rx_missed_errors;
3911 nstats->tx_aborted_errors =
3912 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3913 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3914 nstats->tx_carrier_errors =
3915 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3916 nstats->tx_fifo_errors = 0;
3917 nstats->tx_heartbeat_errors = 0;
3918 nstats->tx_window_errors = 0;
3920 nstats->tx_errors = nstats->tx_aborted_errors +
3921 nstats->tx_carrier_errors +
3922 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3925 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3927 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3930 estats->driver_xoff = 0;
3931 estats->rx_err_discard_pkt = 0;
3932 estats->rx_skb_alloc_failed = 0;
3933 estats->hw_csum_err = 0;
3934 for_each_queue(bp, i) {
3935 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3937 estats->driver_xoff += qstats->driver_xoff;
3938 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3939 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3940 estats->hw_csum_err += qstats->hw_csum_err;
3944 static void bnx2x_stats_update(struct bnx2x *bp)
3946 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3948 if (*stats_comp != DMAE_COMP_VAL)
3952 bnx2x_hw_stats_update(bp);
3954 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3955 BNX2X_ERR("storm stats were not updated for 3 times\n");
3960 bnx2x_net_stats_update(bp);
3961 bnx2x_drv_stats_update(bp);
3963 if (bp->msglevel & NETIF_MSG_TIMER) {
3964 struct tstorm_per_client_stats *old_tclient =
3965 &bp->fp->old_tclient;
3966 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3967 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3968 struct net_device_stats *nstats = &bp->dev->stats;
3971 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3972 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3974 bnx2x_tx_avail(bp->fp),
3975 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3976 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3978 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3979 bp->fp->rx_comp_cons),
3980 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3981 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3982 "brb truncate %u\n",
3983 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3984 qstats->driver_xoff,
3985 estats->brb_drop_lo, estats->brb_truncate_lo);
3986 printk(KERN_DEBUG "tstats: checksum_discard %u "
3987 "packets_too_big_discard %lu no_buff_discard %lu "
3988 "mac_discard %u mac_filter_discard %u "
3989 "xxovrflow_discard %u brb_truncate_discard %u "
3990 "ttl0_discard %u\n",
3991 old_tclient->checksum_discard,
3992 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3993 bnx2x_hilo(&qstats->no_buff_discard_hi),
3994 estats->mac_discard, estats->mac_filter_discard,
3995 estats->xxoverflow_discard, estats->brb_truncate_discard,
3996 old_tclient->ttl0_discard);
3998 for_each_queue(bp, i) {
3999 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4000 bnx2x_fp(bp, i, tx_pkt),
4001 bnx2x_fp(bp, i, rx_pkt),
4002 bnx2x_fp(bp, i, rx_calls));
4006 bnx2x_hw_stats_post(bp);
4007 bnx2x_storm_stats_post(bp);
4010 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4012 struct dmae_command *dmae;
4014 int loader_idx = PMF_DMAE_C(bp);
4015 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4017 bp->executer_idx = 0;
4019 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4021 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4023 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4025 DMAE_CMD_ENDIANITY_DW_SWAP |
4027 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4028 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4030 if (bp->port.port_stx) {
4032 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4034 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4036 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4037 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4038 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4039 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4040 dmae->dst_addr_hi = 0;
4041 dmae->len = sizeof(struct host_port_stats) >> 2;
4043 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4044 dmae->comp_addr_hi = 0;
4047 dmae->comp_addr_lo =
4048 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4049 dmae->comp_addr_hi =
4050 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4051 dmae->comp_val = DMAE_COMP_VAL;
4059 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4060 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4061 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4062 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4063 dmae->dst_addr_lo = bp->func_stx >> 2;
4064 dmae->dst_addr_hi = 0;
4065 dmae->len = sizeof(struct host_func_stats) >> 2;
4066 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4067 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4068 dmae->comp_val = DMAE_COMP_VAL;
4074 static void bnx2x_stats_stop(struct bnx2x *bp)
4078 bnx2x_stats_comp(bp);
4081 update = (bnx2x_hw_stats_update(bp) == 0);
4083 update |= (bnx2x_storm_stats_update(bp) == 0);
4086 bnx2x_net_stats_update(bp);
4089 bnx2x_port_stats_stop(bp);
4091 bnx2x_hw_stats_post(bp);
4092 bnx2x_stats_comp(bp);
4096 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4100 static const struct {
4101 void (*action)(struct bnx2x *bp);
4102 enum bnx2x_stats_state next_state;
4103 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4106 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4107 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4108 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4109 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4112 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4113 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4114 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4115 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4119 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4121 enum bnx2x_stats_state state = bp->stats_state;
4123 bnx2x_stats_stm[state][event].action(bp);
4124 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4126 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4127 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4128 state, event, bp->stats_state);
4131 static void bnx2x_timer(unsigned long data)
4133 struct bnx2x *bp = (struct bnx2x *) data;
4135 if (!netif_running(bp->dev))
4138 if (atomic_read(&bp->intr_sem) != 0)
4142 struct bnx2x_fastpath *fp = &bp->fp[0];
4145 bnx2x_tx_int(fp, 1000);
4146 rc = bnx2x_rx_int(fp, 1000);
4149 if (!BP_NOMCP(bp)) {
4150 int func = BP_FUNC(bp);
4154 ++bp->fw_drv_pulse_wr_seq;
4155 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4156 /* TBD - add SYSTEM_TIME */
4157 drv_pulse = bp->fw_drv_pulse_wr_seq;
4158 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4160 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4161 MCP_PULSE_SEQ_MASK);
4162 /* The delta between driver pulse and mcp response
4163 * should be 1 (before mcp response) or 0 (after mcp response)
4165 if ((drv_pulse != mcp_pulse) &&
4166 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4167 /* someone lost a heartbeat... */
4168 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4169 drv_pulse, mcp_pulse);
4173 if ((bp->state == BNX2X_STATE_OPEN) ||
4174 (bp->state == BNX2X_STATE_DISABLED))
4175 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4178 mod_timer(&bp->timer, jiffies + bp->current_interval);
4181 /* end of Statistics */
4186 * nic init service functions
4189 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4191 int port = BP_PORT(bp);
4193 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4194 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4195 sizeof(struct ustorm_status_block)/4);
4196 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4197 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4198 sizeof(struct cstorm_status_block)/4);
4201 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4202 dma_addr_t mapping, int sb_id)
4204 int port = BP_PORT(bp);
4205 int func = BP_FUNC(bp);
4210 section = ((u64)mapping) + offsetof(struct host_status_block,
4212 sb->u_status_block.status_block_id = sb_id;
4214 REG_WR(bp, BAR_USTRORM_INTMEM +
4215 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4216 REG_WR(bp, BAR_USTRORM_INTMEM +
4217 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4219 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4220 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4222 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4223 REG_WR16(bp, BAR_USTRORM_INTMEM +
4224 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4227 section = ((u64)mapping) + offsetof(struct host_status_block,
4229 sb->c_status_block.status_block_id = sb_id;
4231 REG_WR(bp, BAR_CSTRORM_INTMEM +
4232 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4233 REG_WR(bp, BAR_CSTRORM_INTMEM +
4234 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4236 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4237 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4239 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4240 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4241 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4243 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4246 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4248 int func = BP_FUNC(bp);
4250 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4251 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4252 sizeof(struct ustorm_def_status_block)/4);
4253 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4254 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4255 sizeof(struct cstorm_def_status_block)/4);
4256 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4257 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4258 sizeof(struct xstorm_def_status_block)/4);
4259 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4260 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4261 sizeof(struct tstorm_def_status_block)/4);
4264 static void bnx2x_init_def_sb(struct bnx2x *bp,
4265 struct host_def_status_block *def_sb,
4266 dma_addr_t mapping, int sb_id)
4268 int port = BP_PORT(bp);
4269 int func = BP_FUNC(bp);
4270 int index, val, reg_offset;
4274 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4275 atten_status_block);
4276 def_sb->atten_status_block.status_block_id = sb_id;
4280 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4281 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4283 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4284 bp->attn_group[index].sig[0] = REG_RD(bp,
4285 reg_offset + 0x10*index);
4286 bp->attn_group[index].sig[1] = REG_RD(bp,
4287 reg_offset + 0x4 + 0x10*index);
4288 bp->attn_group[index].sig[2] = REG_RD(bp,
4289 reg_offset + 0x8 + 0x10*index);
4290 bp->attn_group[index].sig[3] = REG_RD(bp,
4291 reg_offset + 0xc + 0x10*index);
4294 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4295 HC_REG_ATTN_MSG0_ADDR_L);
4297 REG_WR(bp, reg_offset, U64_LO(section));
4298 REG_WR(bp, reg_offset + 4, U64_HI(section));
4300 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4302 val = REG_RD(bp, reg_offset);
4304 REG_WR(bp, reg_offset, val);
4307 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4308 u_def_status_block);
4309 def_sb->u_def_status_block.status_block_id = sb_id;
4311 REG_WR(bp, BAR_USTRORM_INTMEM +
4312 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4313 REG_WR(bp, BAR_USTRORM_INTMEM +
4314 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4316 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4317 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4319 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4320 REG_WR16(bp, BAR_USTRORM_INTMEM +
4321 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4324 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4325 c_def_status_block);
4326 def_sb->c_def_status_block.status_block_id = sb_id;
4328 REG_WR(bp, BAR_CSTRORM_INTMEM +
4329 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4330 REG_WR(bp, BAR_CSTRORM_INTMEM +
4331 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4333 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4334 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4336 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4337 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4338 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4341 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4342 t_def_status_block);
4343 def_sb->t_def_status_block.status_block_id = sb_id;
4345 REG_WR(bp, BAR_TSTRORM_INTMEM +
4346 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4347 REG_WR(bp, BAR_TSTRORM_INTMEM +
4348 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4350 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4351 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4353 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4354 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4355 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4358 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4359 x_def_status_block);
4360 def_sb->x_def_status_block.status_block_id = sb_id;
4362 REG_WR(bp, BAR_XSTRORM_INTMEM +
4363 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4364 REG_WR(bp, BAR_XSTRORM_INTMEM +
4365 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4367 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4368 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4370 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4371 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4372 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4374 bp->stats_pending = 0;
4375 bp->set_mac_pending = 0;
4377 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4380 static void bnx2x_update_coalesce(struct bnx2x *bp)
4382 int port = BP_PORT(bp);
4385 for_each_queue(bp, i) {
4386 int sb_id = bp->fp[i].sb_id;
4388 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4389 REG_WR8(bp, BAR_USTRORM_INTMEM +
4390 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4391 U_SB_ETH_RX_CQ_INDEX),
4393 REG_WR16(bp, BAR_USTRORM_INTMEM +
4394 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4395 U_SB_ETH_RX_CQ_INDEX),
4396 bp->rx_ticks ? 0 : 1);
4398 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4399 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4400 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4401 C_SB_ETH_TX_CQ_INDEX),
4403 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4404 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4405 C_SB_ETH_TX_CQ_INDEX),
4406 bp->tx_ticks ? 0 : 1);
4410 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4411 struct bnx2x_fastpath *fp, int last)
4415 for (i = 0; i < last; i++) {
4416 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4417 struct sk_buff *skb = rx_buf->skb;
4420 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4424 if (fp->tpa_state[i] == BNX2X_TPA_START)
4425 pci_unmap_single(bp->pdev,
4426 pci_unmap_addr(rx_buf, mapping),
4428 PCI_DMA_FROMDEVICE);
4435 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4437 int func = BP_FUNC(bp);
4438 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4439 ETH_MAX_AGGREGATION_QUEUES_E1H;
4440 u16 ring_prod, cqe_ring_prod;
4443 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4445 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4447 if (bp->flags & TPA_ENABLE_FLAG) {
4449 for_each_rx_queue(bp, j) {
4450 struct bnx2x_fastpath *fp = &bp->fp[j];
4452 for (i = 0; i < max_agg_queues; i++) {
4453 fp->tpa_pool[i].skb =
4454 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4455 if (!fp->tpa_pool[i].skb) {
4456 BNX2X_ERR("Failed to allocate TPA "
4457 "skb pool for queue[%d] - "
4458 "disabling TPA on this "
4460 bnx2x_free_tpa_pool(bp, fp, i);
4461 fp->disable_tpa = 1;
4464 pci_unmap_addr_set((struct sw_rx_bd *)
4465 &bp->fp->tpa_pool[i],
4467 fp->tpa_state[i] = BNX2X_TPA_STOP;
4472 for_each_rx_queue(bp, j) {
4473 struct bnx2x_fastpath *fp = &bp->fp[j];
4476 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4477 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4479 /* "next page" elements initialization */
4481 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4482 struct eth_rx_sge *sge;
4484 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4486 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4487 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4489 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4490 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4493 bnx2x_init_sge_ring_bit_mask(fp);
4496 for (i = 1; i <= NUM_RX_RINGS; i++) {
4497 struct eth_rx_bd *rx_bd;
4499 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4501 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4502 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4504 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4505 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4509 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4510 struct eth_rx_cqe_next_page *nextpg;
4512 nextpg = (struct eth_rx_cqe_next_page *)
4513 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4515 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4516 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4518 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4519 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4522 /* Allocate SGEs and initialize the ring elements */
4523 for (i = 0, ring_prod = 0;
4524 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4526 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4527 BNX2X_ERR("was only able to allocate "
4529 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4530 /* Cleanup already allocated elements */
4531 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4532 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4533 fp->disable_tpa = 1;
4537 ring_prod = NEXT_SGE_IDX(ring_prod);
4539 fp->rx_sge_prod = ring_prod;
4541 /* Allocate BDs and initialize BD ring */
4542 fp->rx_comp_cons = 0;
4543 cqe_ring_prod = ring_prod = 0;
4544 for (i = 0; i < bp->rx_ring_size; i++) {
4545 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4546 BNX2X_ERR("was only able to allocate "
4547 "%d rx skbs on queue[%d]\n", i, j);
4548 fp->eth_q_stats.rx_skb_alloc_failed++;
4551 ring_prod = NEXT_RX_IDX(ring_prod);
4552 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4553 WARN_ON(ring_prod <= i);
4556 fp->rx_bd_prod = ring_prod;
4557 /* must not have more available CQEs than BDs */
4558 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4560 fp->rx_pkt = fp->rx_calls = 0;
4563 * this will generate an interrupt (to the TSTORM)
4564 * must only be done after chip is initialized
4566 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4571 REG_WR(bp, BAR_USTRORM_INTMEM +
4572 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4573 U64_LO(fp->rx_comp_mapping));
4574 REG_WR(bp, BAR_USTRORM_INTMEM +
4575 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4576 U64_HI(fp->rx_comp_mapping));
4580 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4584 for_each_tx_queue(bp, j) {
4585 struct bnx2x_fastpath *fp = &bp->fp[j];
4587 for (i = 1; i <= NUM_TX_RINGS; i++) {
4588 struct eth_tx_bd *tx_bd =
4589 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4592 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4593 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4595 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4596 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4599 fp->tx_pkt_prod = 0;
4600 fp->tx_pkt_cons = 0;
4603 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4608 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4610 int func = BP_FUNC(bp);
4612 spin_lock_init(&bp->spq_lock);
4614 bp->spq_left = MAX_SPQ_PENDING;
4615 bp->spq_prod_idx = 0;
4616 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4617 bp->spq_prod_bd = bp->spq;
4618 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4620 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4621 U64_LO(bp->spq_mapping));
4623 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4624 U64_HI(bp->spq_mapping));
4626 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4630 static void bnx2x_init_context(struct bnx2x *bp)
4634 for_each_queue(bp, i) {
4635 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4636 struct bnx2x_fastpath *fp = &bp->fp[i];
4637 u8 cl_id = fp->cl_id;
4638 u8 sb_id = FP_SB_ID(fp);
4640 context->ustorm_st_context.common.sb_index_numbers =
4641 BNX2X_RX_SB_INDEX_NUM;
4642 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4643 context->ustorm_st_context.common.status_block_id = sb_id;
4644 context->ustorm_st_context.common.flags =
4645 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4646 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4647 context->ustorm_st_context.common.statistics_counter_id =
4649 context->ustorm_st_context.common.mc_alignment_log_size =
4650 BNX2X_RX_ALIGN_SHIFT;
4651 context->ustorm_st_context.common.bd_buff_size =
4653 context->ustorm_st_context.common.bd_page_base_hi =
4654 U64_HI(fp->rx_desc_mapping);
4655 context->ustorm_st_context.common.bd_page_base_lo =
4656 U64_LO(fp->rx_desc_mapping);
4657 if (!fp->disable_tpa) {
4658 context->ustorm_st_context.common.flags |=
4659 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4660 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4661 context->ustorm_st_context.common.sge_buff_size =
4662 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4664 context->ustorm_st_context.common.sge_page_base_hi =
4665 U64_HI(fp->rx_sge_mapping);
4666 context->ustorm_st_context.common.sge_page_base_lo =
4667 U64_LO(fp->rx_sge_mapping);
4670 context->ustorm_ag_context.cdu_usage =
4671 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4672 CDU_REGION_NUMBER_UCM_AG,
4673 ETH_CONNECTION_TYPE);
4675 context->xstorm_st_context.tx_bd_page_base_hi =
4676 U64_HI(fp->tx_desc_mapping);
4677 context->xstorm_st_context.tx_bd_page_base_lo =
4678 U64_LO(fp->tx_desc_mapping);
4679 context->xstorm_st_context.db_data_addr_hi =
4680 U64_HI(fp->tx_prods_mapping);
4681 context->xstorm_st_context.db_data_addr_lo =
4682 U64_LO(fp->tx_prods_mapping);
4683 context->xstorm_st_context.statistics_data = (fp->cl_id |
4684 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4685 context->cstorm_st_context.sb_index_number =
4686 C_SB_ETH_TX_CQ_INDEX;
4687 context->cstorm_st_context.status_block_id = sb_id;
4689 context->xstorm_ag_context.cdu_reserved =
4690 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4691 CDU_REGION_NUMBER_XCM_AG,
4692 ETH_CONNECTION_TYPE);
4696 static void bnx2x_init_ind_table(struct bnx2x *bp)
4698 int func = BP_FUNC(bp);
4701 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4705 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4706 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4707 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4708 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4709 BP_CL_ID(bp) + (i % bp->num_rx_queues));
4712 static void bnx2x_set_client_config(struct bnx2x *bp)
4714 struct tstorm_eth_client_config tstorm_client = {0};
4715 int port = BP_PORT(bp);
4718 tstorm_client.mtu = bp->dev->mtu;
4719 tstorm_client.config_flags =
4720 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4721 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4723 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4724 tstorm_client.config_flags |=
4725 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4726 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4730 if (bp->flags & TPA_ENABLE_FLAG) {
4731 tstorm_client.max_sges_for_packet =
4732 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4733 tstorm_client.max_sges_for_packet =
4734 ((tstorm_client.max_sges_for_packet +
4735 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4736 PAGES_PER_SGE_SHIFT;
4738 tstorm_client.config_flags |=
4739 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4742 for_each_queue(bp, i) {
4743 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4745 REG_WR(bp, BAR_TSTRORM_INTMEM +
4746 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4747 ((u32 *)&tstorm_client)[0]);
4748 REG_WR(bp, BAR_TSTRORM_INTMEM +
4749 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4750 ((u32 *)&tstorm_client)[1]);
4753 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4754 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4757 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4759 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4760 int mode = bp->rx_mode;
4761 int mask = (1 << BP_L_ID(bp));
4762 int func = BP_FUNC(bp);
4765 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4768 case BNX2X_RX_MODE_NONE: /* no Rx */
4769 tstorm_mac_filter.ucast_drop_all = mask;
4770 tstorm_mac_filter.mcast_drop_all = mask;
4771 tstorm_mac_filter.bcast_drop_all = mask;
4773 case BNX2X_RX_MODE_NORMAL:
4774 tstorm_mac_filter.bcast_accept_all = mask;
4776 case BNX2X_RX_MODE_ALLMULTI:
4777 tstorm_mac_filter.mcast_accept_all = mask;
4778 tstorm_mac_filter.bcast_accept_all = mask;
4780 case BNX2X_RX_MODE_PROMISC:
4781 tstorm_mac_filter.ucast_accept_all = mask;
4782 tstorm_mac_filter.mcast_accept_all = mask;
4783 tstorm_mac_filter.bcast_accept_all = mask;
4786 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4790 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4791 REG_WR(bp, BAR_TSTRORM_INTMEM +
4792 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4793 ((u32 *)&tstorm_mac_filter)[i]);
4795 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4796 ((u32 *)&tstorm_mac_filter)[i]); */
4799 if (mode != BNX2X_RX_MODE_NONE)
4800 bnx2x_set_client_config(bp);
4803 static void bnx2x_init_internal_common(struct bnx2x *bp)
4807 if (bp->flags & TPA_ENABLE_FLAG) {
4808 struct tstorm_eth_tpa_exist tpa = {0};
4812 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4814 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4818 /* Zero this manually as its initialization is
4819 currently missing in the initTool */
4820 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4821 REG_WR(bp, BAR_USTRORM_INTMEM +
4822 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4825 static void bnx2x_init_internal_port(struct bnx2x *bp)
4827 int port = BP_PORT(bp);
4829 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4830 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4831 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4832 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4835 /* Calculates the sum of vn_min_rates.
4836 It's needed for further normalizing of the min_rates.
4838 sum of vn_min_rates.
4840 0 - if all the min_rates are 0.
4841 In the later case fainess algorithm should be deactivated.
4842 If not all min_rates are zero then those that are zeroes will be set to 1.
4844 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4847 int port = BP_PORT(bp);
4850 bp->vn_weight_sum = 0;
4851 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4852 int func = 2*vn + port;
4854 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4855 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4856 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4858 /* Skip hidden vns */
4859 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4862 /* If min rate is zero - set it to 1 */
4864 vn_min_rate = DEF_MIN_RATE;
4868 bp->vn_weight_sum += vn_min_rate;
4871 /* ... only if all min rates are zeros - disable fairness */
4873 bp->vn_weight_sum = 0;
4876 static void bnx2x_init_internal_func(struct bnx2x *bp)
4878 struct tstorm_eth_function_common_config tstorm_config = {0};
4879 struct stats_indication_flags stats_flags = {0};
4880 int port = BP_PORT(bp);
4881 int func = BP_FUNC(bp);
4887 tstorm_config.config_flags = MULTI_FLAGS(bp);
4888 tstorm_config.rss_result_mask = MULTI_MASK;
4891 tstorm_config.config_flags |=
4892 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4894 tstorm_config.leading_client_id = BP_L_ID(bp);
4896 REG_WR(bp, BAR_TSTRORM_INTMEM +
4897 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4898 (*(u32 *)&tstorm_config));
4900 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4901 bnx2x_set_storm_rx_mode(bp);
4903 for_each_queue(bp, i) {
4904 u8 cl_id = bp->fp[i].cl_id;
4906 /* reset xstorm per client statistics */
4907 offset = BAR_XSTRORM_INTMEM +
4908 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4910 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4911 REG_WR(bp, offset + j*4, 0);
4913 /* reset tstorm per client statistics */
4914 offset = BAR_TSTRORM_INTMEM +
4915 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4917 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4918 REG_WR(bp, offset + j*4, 0);
4920 /* reset ustorm per client statistics */
4921 offset = BAR_USTRORM_INTMEM +
4922 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4924 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4925 REG_WR(bp, offset + j*4, 0);
4928 /* Init statistics related context */
4929 stats_flags.collect_eth = 1;
4931 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4932 ((u32 *)&stats_flags)[0]);
4933 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4934 ((u32 *)&stats_flags)[1]);
4936 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4937 ((u32 *)&stats_flags)[0]);
4938 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4939 ((u32 *)&stats_flags)[1]);
4941 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4942 ((u32 *)&stats_flags)[0]);
4943 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4944 ((u32 *)&stats_flags)[1]);
4946 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4947 ((u32 *)&stats_flags)[0]);
4948 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4949 ((u32 *)&stats_flags)[1]);
4951 REG_WR(bp, BAR_XSTRORM_INTMEM +
4952 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4953 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4954 REG_WR(bp, BAR_XSTRORM_INTMEM +
4955 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4956 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4958 REG_WR(bp, BAR_TSTRORM_INTMEM +
4959 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4960 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4961 REG_WR(bp, BAR_TSTRORM_INTMEM +
4962 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4963 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4965 REG_WR(bp, BAR_USTRORM_INTMEM +
4966 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4967 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4968 REG_WR(bp, BAR_USTRORM_INTMEM +
4969 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4970 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4972 if (CHIP_IS_E1H(bp)) {
4973 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4975 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4977 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4979 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4982 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4986 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4988 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4989 SGE_PAGE_SIZE * PAGES_PER_SGE),
4991 for_each_rx_queue(bp, i) {
4992 struct bnx2x_fastpath *fp = &bp->fp[i];
4994 REG_WR(bp, BAR_USTRORM_INTMEM +
4995 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4996 U64_LO(fp->rx_comp_mapping));
4997 REG_WR(bp, BAR_USTRORM_INTMEM +
4998 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4999 U64_HI(fp->rx_comp_mapping));
5001 REG_WR16(bp, BAR_USTRORM_INTMEM +
5002 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
5006 /* dropless flow control */
5007 if (CHIP_IS_E1H(bp)) {
5008 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5010 rx_pause.bd_thr_low = 250;
5011 rx_pause.cqe_thr_low = 250;
5013 rx_pause.sge_thr_low = 0;
5014 rx_pause.bd_thr_high = 350;
5015 rx_pause.cqe_thr_high = 350;
5016 rx_pause.sge_thr_high = 0;
5018 for_each_rx_queue(bp, i) {
5019 struct bnx2x_fastpath *fp = &bp->fp[i];
5021 if (!fp->disable_tpa) {
5022 rx_pause.sge_thr_low = 150;
5023 rx_pause.sge_thr_high = 250;
5027 offset = BAR_USTRORM_INTMEM +
5028 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5031 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5033 REG_WR(bp, offset + j*4,
5034 ((u32 *)&rx_pause)[j]);
5038 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5040 /* Init rate shaping and fairness contexts */
5044 /* During init there is no active link
5045 Until link is up, set link rate to 10Gbps */
5046 bp->link_vars.line_speed = SPEED_10000;
5047 bnx2x_init_port_minmax(bp);
5049 bnx2x_calc_vn_weight_sum(bp);
5051 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5052 bnx2x_init_vn_minmax(bp, 2*vn + port);
5054 /* Enable rate shaping and fairness */
5055 bp->cmng.flags.cmng_enables =
5056 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5057 if (bp->vn_weight_sum)
5058 bp->cmng.flags.cmng_enables |=
5059 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5061 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5062 " fairness will be disabled\n");
5064 /* rate shaping and fairness are disabled */
5066 "single function mode minmax will be disabled\n");
5070 /* Store it to internal memory */
5072 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5073 REG_WR(bp, BAR_XSTRORM_INTMEM +
5074 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5075 ((u32 *)(&bp->cmng))[i]);
5078 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5080 switch (load_code) {
5081 case FW_MSG_CODE_DRV_LOAD_COMMON:
5082 bnx2x_init_internal_common(bp);
5085 case FW_MSG_CODE_DRV_LOAD_PORT:
5086 bnx2x_init_internal_port(bp);
5089 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5090 bnx2x_init_internal_func(bp);
5094 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5099 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5103 for_each_queue(bp, i) {
5104 struct bnx2x_fastpath *fp = &bp->fp[i];
5107 fp->state = BNX2X_FP_STATE_CLOSED;
5109 fp->cl_id = BP_L_ID(bp) + i;
5110 fp->sb_id = fp->cl_id;
5112 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5113 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5114 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5116 bnx2x_update_fpsb_idx(fp);
5119 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5121 bnx2x_update_dsb_idx(bp);
5122 bnx2x_update_coalesce(bp);
5123 bnx2x_init_rx_rings(bp);
5124 bnx2x_init_tx_ring(bp);
5125 bnx2x_init_sp_ring(bp);
5126 bnx2x_init_context(bp);
5127 bnx2x_init_internal(bp, load_code);
5128 bnx2x_init_ind_table(bp);
5129 bnx2x_stats_init(bp);
5131 /* At this point, we are ready for interrupts */
5132 atomic_set(&bp->intr_sem, 0);
5134 /* flush all before enabling interrupts */
5138 bnx2x_int_enable(bp);
5141 /* end of nic init */
5144 * gzip service functions
5147 static int bnx2x_gunzip_init(struct bnx2x *bp)
5149 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5150 &bp->gunzip_mapping);
5151 if (bp->gunzip_buf == NULL)
5154 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5155 if (bp->strm == NULL)
5158 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5160 if (bp->strm->workspace == NULL)
5170 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5171 bp->gunzip_mapping);
5172 bp->gunzip_buf = NULL;
5175 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5176 " un-compression\n", bp->dev->name);
5180 static void bnx2x_gunzip_end(struct bnx2x *bp)
5182 kfree(bp->strm->workspace);
5187 if (bp->gunzip_buf) {
5188 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5189 bp->gunzip_mapping);
5190 bp->gunzip_buf = NULL;
5194 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5198 /* check gzip header */
5199 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5206 if (zbuf[3] & FNAME)
5207 while ((zbuf[n++] != 0) && (n < len));
5209 bp->strm->next_in = zbuf + n;
5210 bp->strm->avail_in = len - n;
5211 bp->strm->next_out = bp->gunzip_buf;
5212 bp->strm->avail_out = FW_BUF_SIZE;
5214 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5218 rc = zlib_inflate(bp->strm, Z_FINISH);
5219 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5220 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5221 bp->dev->name, bp->strm->msg);
5223 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5224 if (bp->gunzip_outlen & 0x3)
5225 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5226 " gunzip_outlen (%d) not aligned\n",
5227 bp->dev->name, bp->gunzip_outlen);
5228 bp->gunzip_outlen >>= 2;
5230 zlib_inflateEnd(bp->strm);
5232 if (rc == Z_STREAM_END)
5238 /* nic load/unload */
5241 * General service functions
5244 /* send a NIG loopback debug packet */
5245 static void bnx2x_lb_pckt(struct bnx2x *bp)
5249 /* Ethernet source and destination addresses */
5250 wb_write[0] = 0x55555555;
5251 wb_write[1] = 0x55555555;
5252 wb_write[2] = 0x20; /* SOP */
5253 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5255 /* NON-IP protocol */
5256 wb_write[0] = 0x09000000;
5257 wb_write[1] = 0x55555555;
5258 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5259 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5262 /* some of the internal memories
5263 * are not directly readable from the driver
5264 * to test them we send debug packets
5266 static int bnx2x_int_mem_test(struct bnx2x *bp)
5272 if (CHIP_REV_IS_FPGA(bp))
5274 else if (CHIP_REV_IS_EMUL(bp))
5279 DP(NETIF_MSG_HW, "start part1\n");
5281 /* Disable inputs of parser neighbor blocks */
5282 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5283 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5284 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5285 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5287 /* Write 0 to parser credits for CFC search request */
5288 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5290 /* send Ethernet packet */
5293 /* TODO do i reset NIG statistic? */
5294 /* Wait until NIG register shows 1 packet of size 0x10 */
5295 count = 1000 * factor;
5298 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5299 val = *bnx2x_sp(bp, wb_data[0]);
5307 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5311 /* Wait until PRS register shows 1 packet */
5312 count = 1000 * factor;
5314 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5322 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5326 /* Reset and init BRB, PRS */
5327 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5329 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5331 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5332 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5334 DP(NETIF_MSG_HW, "part2\n");
5336 /* Disable inputs of parser neighbor blocks */
5337 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5338 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5339 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5340 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5342 /* Write 0 to parser credits for CFC search request */
5343 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5345 /* send 10 Ethernet packets */
5346 for (i = 0; i < 10; i++)
5349 /* Wait until NIG register shows 10 + 1
5350 packets of size 11*0x10 = 0xb0 */
5351 count = 1000 * factor;
5354 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5355 val = *bnx2x_sp(bp, wb_data[0]);
5363 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5367 /* Wait until PRS register shows 2 packets */
5368 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5370 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5372 /* Write 1 to parser credits for CFC search request */
5373 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5375 /* Wait until PRS register shows 3 packets */
5376 msleep(10 * factor);
5377 /* Wait until NIG register shows 1 packet of size 0x10 */
5378 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5380 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5382 /* clear NIG EOP FIFO */
5383 for (i = 0; i < 11; i++)
5384 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5385 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5387 BNX2X_ERR("clear of NIG failed\n");
5391 /* Reset and init BRB, PRS, NIG */
5392 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5394 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5396 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5397 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5400 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5403 /* Enable inputs of parser neighbor blocks */
5404 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5405 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5406 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5407 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5409 DP(NETIF_MSG_HW, "done\n");
5414 static void enable_blocks_attention(struct bnx2x *bp)
5416 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5417 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5418 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5419 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5420 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5421 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5422 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5423 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5424 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5425 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5426 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5427 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5428 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5429 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5430 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5431 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5432 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5433 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5434 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5435 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5436 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5437 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5438 if (CHIP_REV_IS_FPGA(bp))
5439 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5441 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5442 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5443 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5444 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5445 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5446 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5447 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5448 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5449 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5450 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5454 static void bnx2x_reset_common(struct bnx2x *bp)
5457 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5459 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5462 static int bnx2x_init_common(struct bnx2x *bp)
5466 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5468 bnx2x_reset_common(bp);
5469 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5470 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5472 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5473 if (CHIP_IS_E1H(bp))
5474 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5476 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5478 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5480 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5481 if (CHIP_IS_E1(bp)) {
5482 /* enable HW interrupt from PXP on USDM overflow
5483 bit 16 on INT_MASK_0 */
5484 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5487 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5491 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5492 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5493 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5494 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5495 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5496 /* make sure this value is 0 */
5497 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5499 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5500 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5501 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5502 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5503 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5506 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5508 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5509 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5510 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5513 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5514 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5516 /* let the HW do it's magic ... */
5518 /* finish PXP init */
5519 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5521 BNX2X_ERR("PXP2 CFG failed\n");
5524 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5526 BNX2X_ERR("PXP2 RD_INIT failed\n");
5530 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5531 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5533 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5535 /* clean the DMAE memory */
5537 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5539 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5540 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5541 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5542 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5544 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5545 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5546 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5547 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5549 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5550 /* soft reset pulse */
5551 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5552 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5555 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5558 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5559 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5560 if (!CHIP_REV_IS_SLOW(bp)) {
5561 /* enable hw interrupt from doorbell Q */
5562 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5565 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5566 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5567 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5569 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5570 if (CHIP_IS_E1H(bp))
5571 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5573 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5574 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5575 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5576 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5578 if (CHIP_IS_E1H(bp)) {
5579 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5580 STORM_INTMEM_SIZE_E1H/2);
5582 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5583 0, STORM_INTMEM_SIZE_E1H/2);
5584 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5585 STORM_INTMEM_SIZE_E1H/2);
5587 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5588 0, STORM_INTMEM_SIZE_E1H/2);
5589 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5590 STORM_INTMEM_SIZE_E1H/2);
5592 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5593 0, STORM_INTMEM_SIZE_E1H/2);
5594 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5595 STORM_INTMEM_SIZE_E1H/2);
5597 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5598 0, STORM_INTMEM_SIZE_E1H/2);
5600 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5601 STORM_INTMEM_SIZE_E1);
5602 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5603 STORM_INTMEM_SIZE_E1);
5604 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5605 STORM_INTMEM_SIZE_E1);
5606 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5607 STORM_INTMEM_SIZE_E1);
5610 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5611 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5612 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5613 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5616 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5618 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5621 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5622 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5623 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5625 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5626 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5627 REG_WR(bp, i, 0xc0cac01a);
5628 /* TODO: replace with something meaningful */
5630 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5631 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5633 if (sizeof(union cdu_context) != 1024)
5634 /* we currently assume that a context is 1024 bytes */
5635 printk(KERN_ALERT PFX "please adjust the size of"
5636 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5638 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5639 val = (4 << 24) + (0 << 12) + 1024;
5640 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5641 if (CHIP_IS_E1(bp)) {
5642 /* !!! fix pxp client crdit until excel update */
5643 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5644 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5647 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5648 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5649 /* enable context validation interrupt from CFC */
5650 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5652 /* set the thresholds to prevent CFC/CDU race */
5653 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5655 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5656 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5658 /* PXPCS COMMON comes here */
5659 /* Reset PCIE errors for debug */
5660 REG_WR(bp, 0x2814, 0xffffffff);
5661 REG_WR(bp, 0x3820, 0xffffffff);
5663 /* EMAC0 COMMON comes here */
5664 /* EMAC1 COMMON comes here */
5665 /* DBU COMMON comes here */
5666 /* DBG COMMON comes here */
5668 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5669 if (CHIP_IS_E1H(bp)) {
5670 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5671 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5674 if (CHIP_REV_IS_SLOW(bp))
5677 /* finish CFC init */
5678 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5680 BNX2X_ERR("CFC LL_INIT failed\n");
5683 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5685 BNX2X_ERR("CFC AC_INIT failed\n");
5688 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5690 BNX2X_ERR("CFC CAM_INIT failed\n");
5693 REG_WR(bp, CFC_REG_DEBUG0, 0);
5695 /* read NIG statistic
5696 to see if this is our first up since powerup */
5697 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5698 val = *bnx2x_sp(bp, wb_data[0]);
5700 /* do internal memory self test */
5701 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5702 BNX2X_ERR("internal mem self test failed\n");
5706 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5707 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5708 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5709 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5710 bp->port.need_hw_lock = 1;
5713 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5714 /* Fan failure is indicated by SPIO 5 */
5715 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5716 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5718 /* set to active low mode */
5719 val = REG_RD(bp, MISC_REG_SPIO_INT);
5720 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5721 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5722 REG_WR(bp, MISC_REG_SPIO_INT, val);
5724 /* enable interrupt to signal the IGU */
5725 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5726 val |= (1 << MISC_REGISTERS_SPIO_5);
5727 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5734 /* clear PXP2 attentions */
5735 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5737 enable_blocks_attention(bp);
5739 if (!BP_NOMCP(bp)) {
5740 bnx2x_acquire_phy_lock(bp);
5741 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5742 bnx2x_release_phy_lock(bp);
5744 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5749 static int bnx2x_init_port(struct bnx2x *bp)
5751 int port = BP_PORT(bp);
5755 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5757 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5759 /* Port PXP comes here */
5760 /* Port PXP2 comes here */
5765 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5766 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5767 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5768 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5773 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5774 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5775 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5776 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5781 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5782 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5783 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5784 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5786 /* Port CMs come here */
5787 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5788 (port ? XCM_PORT1_END : XCM_PORT0_END));
5790 /* Port QM comes here */
5792 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5793 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5795 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5796 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5798 /* Port DQ comes here */
5800 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5801 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5802 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5803 /* no pause for emulation and FPGA */
5808 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5809 else if (bp->dev->mtu > 4096) {
5810 if (bp->flags & ONE_PORT_FLAG)
5814 /* (24*1024 + val*4)/256 */
5815 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5818 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5819 high = low + 56; /* 14*1024/256 */
5821 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5822 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5825 /* Port PRS comes here */
5826 /* Port TSDM comes here */
5827 /* Port CSDM comes here */
5828 /* Port USDM comes here */
5829 /* Port XSDM comes here */
5830 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5831 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5832 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5833 port ? USEM_PORT1_END : USEM_PORT0_END);
5834 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5835 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5836 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5837 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5838 /* Port UPB comes here */
5839 /* Port XPB comes here */
5841 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5842 port ? PBF_PORT1_END : PBF_PORT0_END);
5844 /* configure PBF to work without PAUSE mtu 9000 */
5845 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5847 /* update threshold */
5848 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5849 /* update init credit */
5850 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5853 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5855 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5858 /* tell the searcher where the T2 table is */
5859 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5861 wb_write[0] = U64_LO(bp->t2_mapping);
5862 wb_write[1] = U64_HI(bp->t2_mapping);
5863 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5864 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5865 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5866 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5868 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5869 /* Port SRCH comes here */
5871 /* Port CDU comes here */
5872 /* Port CFC comes here */
5874 if (CHIP_IS_E1(bp)) {
5875 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5876 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5878 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5879 port ? HC_PORT1_END : HC_PORT0_END);
5881 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5882 MISC_AEU_PORT0_START,
5883 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5884 /* init aeu_mask_attn_func_0/1:
5885 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5886 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5887 * bits 4-7 are used for "per vn group attention" */
5888 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5889 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5891 /* Port PXPCS comes here */
5892 /* Port EMAC0 comes here */
5893 /* Port EMAC1 comes here */
5894 /* Port DBU comes here */
5895 /* Port DBG comes here */
5896 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5897 port ? NIG_PORT1_END : NIG_PORT0_END);
5899 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5901 if (CHIP_IS_E1H(bp)) {
5902 /* 0x2 disable e1hov, 0x1 enable */
5903 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5904 (IS_E1HMF(bp) ? 0x1 : 0x2));
5906 /* support pause requests from USDM, TSDM and BRB */
5907 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5910 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5911 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5912 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5916 /* Port MCP comes here */
5917 /* Port DMAE comes here */
5919 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5920 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5922 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5924 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5925 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5927 /* The GPIO should be swapped if the swap register is
5929 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5930 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5932 /* Select function upon port-swap configuration */
5934 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5935 aeu_gpio_mask = (swap_val && swap_override) ?
5936 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5937 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5939 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5940 aeu_gpio_mask = (swap_val && swap_override) ?
5941 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5942 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5944 val = REG_RD(bp, offset);
5945 /* add GPIO3 to group */
5946 val |= aeu_gpio_mask;
5947 REG_WR(bp, offset, val);
5951 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5952 /* add SPIO 5 to group 0 */
5953 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5954 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5955 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5962 bnx2x__link_reset(bp);
5967 #define ILT_PER_FUNC (768/2)
5968 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5969 /* the phys address is shifted right 12 bits and has an added
5970 1=valid bit added to the 53rd bit
5971 then since this is a wide register(TM)
5972 we split it into two 32 bit writes
5974 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5975 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5976 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5977 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5979 #define CNIC_ILT_LINES 0
5981 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5985 if (CHIP_IS_E1H(bp))
5986 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5988 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5990 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5993 static int bnx2x_init_func(struct bnx2x *bp)
5995 int port = BP_PORT(bp);
5996 int func = BP_FUNC(bp);
6000 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6002 /* set MSI reconfigure capability */
6003 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6004 val = REG_RD(bp, addr);
6005 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6006 REG_WR(bp, addr, val);
6008 i = FUNC_ILT_BASE(func);
6010 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6011 if (CHIP_IS_E1H(bp)) {
6012 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6013 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6015 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6016 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6019 if (CHIP_IS_E1H(bp)) {
6020 for (i = 0; i < 9; i++)
6021 bnx2x_init_block(bp,
6022 cm_start[func][i], cm_end[func][i]);
6024 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6025 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6028 /* HC init per function */
6029 if (CHIP_IS_E1H(bp)) {
6030 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6032 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6033 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6035 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6037 /* Reset PCIE errors for debug */
6038 REG_WR(bp, 0x2114, 0xffffffff);
6039 REG_WR(bp, 0x2120, 0xffffffff);
6044 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6048 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6049 BP_FUNC(bp), load_code);
6052 mutex_init(&bp->dmae_mutex);
6053 bnx2x_gunzip_init(bp);
6055 switch (load_code) {
6056 case FW_MSG_CODE_DRV_LOAD_COMMON:
6057 rc = bnx2x_init_common(bp);
6062 case FW_MSG_CODE_DRV_LOAD_PORT:
6064 rc = bnx2x_init_port(bp);
6069 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6071 rc = bnx2x_init_func(bp);
6077 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6081 if (!BP_NOMCP(bp)) {
6082 int func = BP_FUNC(bp);
6084 bp->fw_drv_pulse_wr_seq =
6085 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6086 DRV_PULSE_SEQ_MASK);
6087 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6088 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6089 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6093 /* this needs to be done before gunzip end */
6094 bnx2x_zero_def_sb(bp);
6095 for_each_queue(bp, i)
6096 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6099 bnx2x_gunzip_end(bp);
6104 /* send the MCP a request, block until there is a reply */
6105 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6107 int func = BP_FUNC(bp);
6108 u32 seq = ++bp->fw_seq;
6111 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6113 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6114 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6117 /* let the FW do it's magic ... */
6120 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6122 /* Give the FW up to 2 second (200*10ms) */
6123 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6125 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6126 cnt*delay, rc, seq);
6128 /* is this a reply to our command? */
6129 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6130 rc &= FW_MSG_CODE_MASK;
6134 BNX2X_ERR("FW failed to respond!\n");
6142 static void bnx2x_free_mem(struct bnx2x *bp)
6145 #define BNX2X_PCI_FREE(x, y, size) \
6148 pci_free_consistent(bp->pdev, size, x, y); \
6154 #define BNX2X_FREE(x) \
6166 for_each_queue(bp, i) {
6169 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6170 bnx2x_fp(bp, i, status_blk_mapping),
6171 sizeof(struct host_status_block) +
6172 sizeof(struct eth_tx_db_data));
6175 for_each_rx_queue(bp, i) {
6177 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6178 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6179 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6180 bnx2x_fp(bp, i, rx_desc_mapping),
6181 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6183 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6184 bnx2x_fp(bp, i, rx_comp_mapping),
6185 sizeof(struct eth_fast_path_rx_cqe) *
6189 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6190 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6191 bnx2x_fp(bp, i, rx_sge_mapping),
6192 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6195 for_each_tx_queue(bp, i) {
6197 /* fastpath tx rings: tx_buf tx_desc */
6198 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6199 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6200 bnx2x_fp(bp, i, tx_desc_mapping),
6201 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6203 /* end of fastpath */
6205 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6206 sizeof(struct host_def_status_block));
6208 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6209 sizeof(struct bnx2x_slowpath));
6212 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6213 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6214 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6215 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6217 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6219 #undef BNX2X_PCI_FREE
6223 static int bnx2x_alloc_mem(struct bnx2x *bp)
6226 #define BNX2X_PCI_ALLOC(x, y, size) \
6228 x = pci_alloc_consistent(bp->pdev, size, y); \
6230 goto alloc_mem_err; \
6231 memset(x, 0, size); \
6234 #define BNX2X_ALLOC(x, size) \
6236 x = vmalloc(size); \
6238 goto alloc_mem_err; \
6239 memset(x, 0, size); \
6246 for_each_queue(bp, i) {
6247 bnx2x_fp(bp, i, bp) = bp;
6250 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6251 &bnx2x_fp(bp, i, status_blk_mapping),
6252 sizeof(struct host_status_block) +
6253 sizeof(struct eth_tx_db_data));
6256 for_each_rx_queue(bp, i) {
6258 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6259 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6260 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6261 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6262 &bnx2x_fp(bp, i, rx_desc_mapping),
6263 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6265 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6266 &bnx2x_fp(bp, i, rx_comp_mapping),
6267 sizeof(struct eth_fast_path_rx_cqe) *
6271 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6272 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6273 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6274 &bnx2x_fp(bp, i, rx_sge_mapping),
6275 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6278 for_each_tx_queue(bp, i) {
6280 bnx2x_fp(bp, i, hw_tx_prods) =
6281 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6283 bnx2x_fp(bp, i, tx_prods_mapping) =
6284 bnx2x_fp(bp, i, status_blk_mapping) +
6285 sizeof(struct host_status_block);
6287 /* fastpath tx rings: tx_buf tx_desc */
6288 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6289 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6290 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6291 &bnx2x_fp(bp, i, tx_desc_mapping),
6292 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6294 /* end of fastpath */
6296 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6297 sizeof(struct host_def_status_block));
6299 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6300 sizeof(struct bnx2x_slowpath));
6303 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6306 for (i = 0; i < 64*1024; i += 64) {
6307 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6308 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6311 /* allocate searcher T2 table
6312 we allocate 1/4 of alloc num for T2
6313 (which is not entered into the ILT) */
6314 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6317 for (i = 0; i < 16*1024; i += 64)
6318 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6320 /* now fixup the last line in the block to point to the next block */
6321 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6323 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6324 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6326 /* QM queues (128*MAX_CONN) */
6327 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6330 /* Slow path ring */
6331 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6339 #undef BNX2X_PCI_ALLOC
6343 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6347 for_each_tx_queue(bp, i) {
6348 struct bnx2x_fastpath *fp = &bp->fp[i];
6350 u16 bd_cons = fp->tx_bd_cons;
6351 u16 sw_prod = fp->tx_pkt_prod;
6352 u16 sw_cons = fp->tx_pkt_cons;
6354 while (sw_cons != sw_prod) {
6355 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6361 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6365 for_each_rx_queue(bp, j) {
6366 struct bnx2x_fastpath *fp = &bp->fp[j];
6368 for (i = 0; i < NUM_RX_BD; i++) {
6369 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6370 struct sk_buff *skb = rx_buf->skb;
6375 pci_unmap_single(bp->pdev,
6376 pci_unmap_addr(rx_buf, mapping),
6378 PCI_DMA_FROMDEVICE);
6383 if (!fp->disable_tpa)
6384 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6385 ETH_MAX_AGGREGATION_QUEUES_E1 :
6386 ETH_MAX_AGGREGATION_QUEUES_E1H);
6390 static void bnx2x_free_skbs(struct bnx2x *bp)
6392 bnx2x_free_tx_skbs(bp);
6393 bnx2x_free_rx_skbs(bp);
6396 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6400 free_irq(bp->msix_table[0].vector, bp->dev);
6401 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6402 bp->msix_table[0].vector);
6404 for_each_queue(bp, i) {
6405 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6406 "state %x\n", i, bp->msix_table[i + offset].vector,
6407 bnx2x_fp(bp, i, state));
6409 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6413 static void bnx2x_free_irq(struct bnx2x *bp)
6415 if (bp->flags & USING_MSIX_FLAG) {
6416 bnx2x_free_msix_irqs(bp);
6417 pci_disable_msix(bp->pdev);
6418 bp->flags &= ~USING_MSIX_FLAG;
6420 } else if (bp->flags & USING_MSI_FLAG) {
6421 free_irq(bp->pdev->irq, bp->dev);
6422 pci_disable_msi(bp->pdev);
6423 bp->flags &= ~USING_MSI_FLAG;
6426 free_irq(bp->pdev->irq, bp->dev);
6429 static int bnx2x_enable_msix(struct bnx2x *bp)
6431 int i, rc, offset = 1;
6434 bp->msix_table[0].entry = igu_vec;
6435 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6437 for_each_queue(bp, i) {
6438 igu_vec = BP_L_ID(bp) + offset + i;
6439 bp->msix_table[i + offset].entry = igu_vec;
6440 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6441 "(fastpath #%u)\n", i + offset, igu_vec, i);
6444 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6445 BNX2X_NUM_QUEUES(bp) + offset);
6447 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6451 bp->flags |= USING_MSIX_FLAG;
6456 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6458 int i, rc, offset = 1;
6460 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6461 bp->dev->name, bp->dev);
6463 BNX2X_ERR("request sp irq failed\n");
6467 for_each_queue(bp, i) {
6468 struct bnx2x_fastpath *fp = &bp->fp[i];
6470 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6471 rc = request_irq(bp->msix_table[i + offset].vector,
6472 bnx2x_msix_fp_int, 0, fp->name, fp);
6474 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6475 bnx2x_free_msix_irqs(bp);
6479 fp->state = BNX2X_FP_STATE_IRQ;
6482 i = BNX2X_NUM_QUEUES(bp);
6484 printk(KERN_INFO PFX
6485 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6486 bp->dev->name, bp->msix_table[0].vector,
6487 bp->msix_table[offset].vector,
6488 bp->msix_table[offset + i - 1].vector);
6490 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6491 bp->dev->name, bp->msix_table[0].vector,
6492 bp->msix_table[offset + i - 1].vector);
6497 static int bnx2x_enable_msi(struct bnx2x *bp)
6501 rc = pci_enable_msi(bp->pdev);
6503 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6506 bp->flags |= USING_MSI_FLAG;
6511 static int bnx2x_req_irq(struct bnx2x *bp)
6513 unsigned long flags;
6516 if (bp->flags & USING_MSI_FLAG)
6519 flags = IRQF_SHARED;
6521 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6522 bp->dev->name, bp->dev);
6524 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6529 static void bnx2x_napi_enable(struct bnx2x *bp)
6533 for_each_rx_queue(bp, i)
6534 napi_enable(&bnx2x_fp(bp, i, napi));
6537 static void bnx2x_napi_disable(struct bnx2x *bp)
6541 for_each_rx_queue(bp, i)
6542 napi_disable(&bnx2x_fp(bp, i, napi));
6545 static void bnx2x_netif_start(struct bnx2x *bp)
6547 if (atomic_dec_and_test(&bp->intr_sem)) {
6548 if (netif_running(bp->dev)) {
6549 bnx2x_napi_enable(bp);
6550 bnx2x_int_enable(bp);
6551 if (bp->state == BNX2X_STATE_OPEN)
6552 netif_tx_wake_all_queues(bp->dev);
6557 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6559 bnx2x_int_disable_sync(bp, disable_hw);
6560 bnx2x_napi_disable(bp);
6561 if (netif_running(bp->dev)) {
6562 netif_tx_disable(bp->dev);
6563 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6568 * Init service functions
6571 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6573 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6574 int port = BP_PORT(bp);
6577 * unicasts 0-31:port0 32-63:port1
6578 * multicast 64-127:port0 128-191:port1
6580 config->hdr.length = 2;
6581 config->hdr.offset = port ? 32 : 0;
6582 config->hdr.client_id = BP_CL_ID(bp);
6583 config->hdr.reserved1 = 0;
6586 config->config_table[0].cam_entry.msb_mac_addr =
6587 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6588 config->config_table[0].cam_entry.middle_mac_addr =
6589 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6590 config->config_table[0].cam_entry.lsb_mac_addr =
6591 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6592 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6594 config->config_table[0].target_table_entry.flags = 0;
6596 CAM_INVALIDATE(config->config_table[0]);
6597 config->config_table[0].target_table_entry.client_id = 0;
6598 config->config_table[0].target_table_entry.vlan_id = 0;
6600 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6601 (set ? "setting" : "clearing"),
6602 config->config_table[0].cam_entry.msb_mac_addr,
6603 config->config_table[0].cam_entry.middle_mac_addr,
6604 config->config_table[0].cam_entry.lsb_mac_addr);
6607 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6608 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6609 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6610 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6612 config->config_table[1].target_table_entry.flags =
6613 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6615 CAM_INVALIDATE(config->config_table[1]);
6616 config->config_table[1].target_table_entry.client_id = 0;
6617 config->config_table[1].target_table_entry.vlan_id = 0;
6619 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6620 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6621 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6624 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6626 struct mac_configuration_cmd_e1h *config =
6627 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6629 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6630 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6634 /* CAM allocation for E1H
6635 * unicasts: by func number
6636 * multicast: 20+FUNC*20, 20 each
6638 config->hdr.length = 1;
6639 config->hdr.offset = BP_FUNC(bp);
6640 config->hdr.client_id = BP_CL_ID(bp);
6641 config->hdr.reserved1 = 0;
6644 config->config_table[0].msb_mac_addr =
6645 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6646 config->config_table[0].middle_mac_addr =
6647 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6648 config->config_table[0].lsb_mac_addr =
6649 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6650 config->config_table[0].client_id = BP_L_ID(bp);
6651 config->config_table[0].vlan_id = 0;
6652 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6654 config->config_table[0].flags = BP_PORT(bp);
6656 config->config_table[0].flags =
6657 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6659 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6660 (set ? "setting" : "clearing"),
6661 config->config_table[0].msb_mac_addr,
6662 config->config_table[0].middle_mac_addr,
6663 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6665 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6666 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6667 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6670 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6671 int *state_p, int poll)
6673 /* can take a while if any port is running */
6676 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6677 poll ? "polling" : "waiting", state, idx);
6682 bnx2x_rx_int(bp->fp, 10);
6683 /* if index is different from 0
6684 * the reply for some commands will
6685 * be on the non default queue
6688 bnx2x_rx_int(&bp->fp[idx], 10);
6691 mb(); /* state is changed by bnx2x_sp_event() */
6692 if (*state_p == state) {
6693 #ifdef BNX2X_STOP_ON_ERROR
6694 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6703 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6704 poll ? "polling" : "waiting", state, idx);
6705 #ifdef BNX2X_STOP_ON_ERROR
6712 static int bnx2x_setup_leading(struct bnx2x *bp)
6716 /* reset IGU state */
6717 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6720 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6722 /* Wait for completion */
6723 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6728 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6730 struct bnx2x_fastpath *fp = &bp->fp[index];
6732 /* reset IGU state */
6733 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6736 fp->state = BNX2X_FP_STATE_OPENING;
6737 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6740 /* Wait for completion */
6741 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6745 static int bnx2x_poll(struct napi_struct *napi, int budget);
6747 static void bnx2x_set_int_mode(struct bnx2x *bp)
6755 bp->num_rx_queues = num_queues;
6756 bp->num_tx_queues = num_queues;
6758 "set number of queues to %d\n", num_queues);
6763 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6764 num_queues = min_t(u32, num_online_cpus(),
6765 BNX2X_MAX_QUEUES(bp));
6768 bp->num_rx_queues = num_queues;
6769 bp->num_tx_queues = num_queues;
6770 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6771 " number of tx queues to %d\n",
6772 bp->num_rx_queues, bp->num_tx_queues);
6773 /* if we can't use MSI-X we only need one fp,
6774 * so try to enable MSI-X with the requested number of fp's
6775 * and fallback to MSI or legacy INTx with one fp
6777 if (bnx2x_enable_msix(bp)) {
6778 /* failed to enable MSI-X */
6780 bp->num_rx_queues = num_queues;
6781 bp->num_tx_queues = num_queues;
6783 BNX2X_ERR("Multi requested but failed to "
6784 "enable MSI-X set number of "
6785 "queues to %d\n", num_queues);
6789 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6792 static void bnx2x_set_rx_mode(struct net_device *dev);
6794 /* must be called with rtnl_lock */
6795 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6799 #ifdef BNX2X_STOP_ON_ERROR
6800 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6801 if (unlikely(bp->panic))
6805 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6807 bnx2x_set_int_mode(bp);
6809 if (bnx2x_alloc_mem(bp))
6812 for_each_rx_queue(bp, i)
6813 bnx2x_fp(bp, i, disable_tpa) =
6814 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6816 for_each_rx_queue(bp, i)
6817 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6820 #ifdef BNX2X_STOP_ON_ERROR
6821 for_each_rx_queue(bp, i) {
6822 struct bnx2x_fastpath *fp = &bp->fp[i];
6824 fp->poll_no_work = 0;
6826 fp->poll_max_calls = 0;
6827 fp->poll_complete = 0;
6831 bnx2x_napi_enable(bp);
6833 if (bp->flags & USING_MSIX_FLAG) {
6834 rc = bnx2x_req_msix_irqs(bp);
6836 pci_disable_msix(bp->pdev);
6840 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6841 bnx2x_enable_msi(bp);
6843 rc = bnx2x_req_irq(bp);
6845 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6846 if (bp->flags & USING_MSI_FLAG)
6847 pci_disable_msi(bp->pdev);
6850 if (bp->flags & USING_MSI_FLAG) {
6851 bp->dev->irq = bp->pdev->irq;
6852 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6853 bp->dev->name, bp->pdev->irq);
6857 /* Send LOAD_REQUEST command to MCP
6858 Returns the type of LOAD command:
6859 if it is the first port to be initialized
6860 common blocks should be initialized, otherwise - not
6862 if (!BP_NOMCP(bp)) {
6863 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6865 BNX2X_ERR("MCP response failure, aborting\n");
6869 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6870 rc = -EBUSY; /* other port in diagnostic mode */
6875 int port = BP_PORT(bp);
6877 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6878 load_count[0], load_count[1], load_count[2]);
6880 load_count[1 + port]++;
6881 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6882 load_count[0], load_count[1], load_count[2]);
6883 if (load_count[0] == 1)
6884 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6885 else if (load_count[1 + port] == 1)
6886 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6888 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6891 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6892 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6896 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6899 rc = bnx2x_init_hw(bp, load_code);
6901 BNX2X_ERR("HW init failed, aborting\n");
6905 /* Setup NIC internals and enable interrupts */
6906 bnx2x_nic_init(bp, load_code);
6908 /* Send LOAD_DONE command to MCP */
6909 if (!BP_NOMCP(bp)) {
6910 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6912 BNX2X_ERR("MCP response failure, aborting\n");
6918 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6920 rc = bnx2x_setup_leading(bp);
6922 BNX2X_ERR("Setup leading failed!\n");
6926 if (CHIP_IS_E1H(bp))
6927 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6928 BNX2X_ERR("!!! mf_cfg function disabled\n");
6929 bp->state = BNX2X_STATE_DISABLED;
6932 if (bp->state == BNX2X_STATE_OPEN)
6933 for_each_nondefault_queue(bp, i) {
6934 rc = bnx2x_setup_multi(bp, i);
6940 bnx2x_set_mac_addr_e1(bp, 1);
6942 bnx2x_set_mac_addr_e1h(bp, 1);
6945 bnx2x_initial_phy_init(bp);
6947 /* Start fast path */
6948 switch (load_mode) {
6950 /* Tx queue should be only reenabled */
6951 netif_tx_wake_all_queues(bp->dev);
6952 /* Initialize the receive filter. */
6953 bnx2x_set_rx_mode(bp->dev);
6957 netif_tx_start_all_queues(bp->dev);
6958 /* Initialize the receive filter. */
6959 bnx2x_set_rx_mode(bp->dev);
6963 /* Initialize the receive filter. */
6964 bnx2x_set_rx_mode(bp->dev);
6965 bp->state = BNX2X_STATE_DIAG;
6973 bnx2x__link_status_update(bp);
6975 /* start the timer */
6976 mod_timer(&bp->timer, jiffies + bp->current_interval);
6982 bnx2x_int_disable_sync(bp, 1);
6983 if (!BP_NOMCP(bp)) {
6984 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6985 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6988 /* Free SKBs, SGEs, TPA pool and driver internals */
6989 bnx2x_free_skbs(bp);
6990 for_each_rx_queue(bp, i)
6991 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6996 bnx2x_napi_disable(bp);
6997 for_each_rx_queue(bp, i)
6998 netif_napi_del(&bnx2x_fp(bp, i, napi));
7001 /* TBD we really need to reset the chip
7002 if we want to recover from this */
7006 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7008 struct bnx2x_fastpath *fp = &bp->fp[index];
7011 /* halt the connection */
7012 fp->state = BNX2X_FP_STATE_HALTING;
7013 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7015 /* Wait for completion */
7016 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7018 if (rc) /* timeout */
7021 /* delete cfc entry */
7022 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7024 /* Wait for completion */
7025 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7030 static int bnx2x_stop_leading(struct bnx2x *bp)
7032 u16 dsb_sp_prod_idx;
7033 /* if the other port is handling traffic,
7034 this can take a lot of time */
7040 /* Send HALT ramrod */
7041 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7042 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
7044 /* Wait for completion */
7045 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7046 &(bp->fp[0].state), 1);
7047 if (rc) /* timeout */
7050 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7052 /* Send PORT_DELETE ramrod */
7053 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7055 /* Wait for completion to arrive on default status block
7056 we are going to reset the chip anyway
7057 so there is not much to do if this times out
7059 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7061 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7062 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7063 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7064 #ifdef BNX2X_STOP_ON_ERROR
7072 rmb(); /* Refresh the dsb_sp_prod */
7074 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7075 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7080 static void bnx2x_reset_func(struct bnx2x *bp)
7082 int port = BP_PORT(bp);
7083 int func = BP_FUNC(bp);
7087 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7088 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7091 base = FUNC_ILT_BASE(func);
7092 for (i = base; i < base + ILT_PER_FUNC; i++)
7093 bnx2x_ilt_wr(bp, i, 0);
7096 static void bnx2x_reset_port(struct bnx2x *bp)
7098 int port = BP_PORT(bp);
7101 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7103 /* Do not rcv packets to BRB */
7104 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7105 /* Do not direct rcv packets that are not for MCP to the BRB */
7106 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7107 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7110 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7113 /* Check for BRB port occupancy */
7114 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7116 DP(NETIF_MSG_IFDOWN,
7117 "BRB1 is not empty %d blocks are occupied\n", val);
7119 /* TODO: Close Doorbell port? */
7122 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7124 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7125 BP_FUNC(bp), reset_code);
7127 switch (reset_code) {
7128 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7129 bnx2x_reset_port(bp);
7130 bnx2x_reset_func(bp);
7131 bnx2x_reset_common(bp);
7134 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7135 bnx2x_reset_port(bp);
7136 bnx2x_reset_func(bp);
7139 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7140 bnx2x_reset_func(bp);
7144 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7149 /* must be called with rtnl_lock */
7150 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7152 int port = BP_PORT(bp);
7156 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7158 bp->rx_mode = BNX2X_RX_MODE_NONE;
7159 bnx2x_set_storm_rx_mode(bp);
7161 bnx2x_netif_stop(bp, 1);
7163 del_timer_sync(&bp->timer);
7164 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7165 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7166 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7171 /* Wait until tx fastpath tasks complete */
7172 for_each_tx_queue(bp, i) {
7173 struct bnx2x_fastpath *fp = &bp->fp[i];
7177 while (bnx2x_has_tx_work_unload(fp)) {
7179 bnx2x_tx_int(fp, 1000);
7181 BNX2X_ERR("timeout waiting for queue[%d]\n",
7183 #ifdef BNX2X_STOP_ON_ERROR
7195 /* Give HW time to discard old tx messages */
7198 if (CHIP_IS_E1(bp)) {
7199 struct mac_configuration_cmd *config =
7200 bnx2x_sp(bp, mcast_config);
7202 bnx2x_set_mac_addr_e1(bp, 0);
7204 for (i = 0; i < config->hdr.length; i++)
7205 CAM_INVALIDATE(config->config_table[i]);
7207 config->hdr.length = i;
7208 if (CHIP_REV_IS_SLOW(bp))
7209 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7211 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7212 config->hdr.client_id = BP_CL_ID(bp);
7213 config->hdr.reserved1 = 0;
7215 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7216 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7217 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7220 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7222 bnx2x_set_mac_addr_e1h(bp, 0);
7224 for (i = 0; i < MC_HASH_SIZE; i++)
7225 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7228 if (unload_mode == UNLOAD_NORMAL)
7229 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7231 else if (bp->flags & NO_WOL_FLAG) {
7232 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7233 if (CHIP_IS_E1H(bp))
7234 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7236 } else if (bp->wol) {
7237 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7238 u8 *mac_addr = bp->dev->dev_addr;
7240 /* The mac address is written to entries 1-4 to
7241 preserve entry 0 which is used by the PMF */
7242 u8 entry = (BP_E1HVN(bp) + 1)*8;
7244 val = (mac_addr[0] << 8) | mac_addr[1];
7245 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7247 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7248 (mac_addr[4] << 8) | mac_addr[5];
7249 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7251 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7254 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7256 /* Close multi and leading connections
7257 Completions for ramrods are collected in a synchronous way */
7258 for_each_nondefault_queue(bp, i)
7259 if (bnx2x_stop_multi(bp, i))
7262 rc = bnx2x_stop_leading(bp);
7264 BNX2X_ERR("Stop leading failed!\n");
7265 #ifdef BNX2X_STOP_ON_ERROR
7274 reset_code = bnx2x_fw_command(bp, reset_code);
7276 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7277 load_count[0], load_count[1], load_count[2]);
7279 load_count[1 + port]--;
7280 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7281 load_count[0], load_count[1], load_count[2]);
7282 if (load_count[0] == 0)
7283 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7284 else if (load_count[1 + port] == 0)
7285 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7287 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7290 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7291 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7292 bnx2x__link_reset(bp);
7294 /* Reset the chip */
7295 bnx2x_reset_chip(bp, reset_code);
7297 /* Report UNLOAD_DONE to MCP */
7299 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7302 /* Free SKBs, SGEs, TPA pool and driver internals */
7303 bnx2x_free_skbs(bp);
7304 for_each_rx_queue(bp, i)
7305 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7306 for_each_rx_queue(bp, i)
7307 netif_napi_del(&bnx2x_fp(bp, i, napi));
7310 bp->state = BNX2X_STATE_CLOSED;
7312 netif_carrier_off(bp->dev);
7317 static void bnx2x_reset_task(struct work_struct *work)
7319 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7321 #ifdef BNX2X_STOP_ON_ERROR
7322 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7323 " so reset not done to allow debug dump,\n"
7324 KERN_ERR " you will need to reboot when done\n");
7330 if (!netif_running(bp->dev))
7331 goto reset_task_exit;
7333 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7334 bnx2x_nic_load(bp, LOAD_NORMAL);
7340 /* end of nic load/unload */
7345 * Init service functions
7348 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7351 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7352 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7353 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7354 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7355 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7356 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7357 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7358 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7360 BNX2X_ERR("Unsupported function index: %d\n", func);
7365 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7367 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7369 /* Flush all outstanding writes */
7372 /* Pretend to be function 0 */
7374 /* Flush the GRC transaction (in the chip) */
7375 new_val = REG_RD(bp, reg);
7377 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7382 /* From now we are in the "like-E1" mode */
7383 bnx2x_int_disable(bp);
7385 /* Flush all outstanding writes */
7388 /* Restore the original funtion settings */
7389 REG_WR(bp, reg, orig_func);
7390 new_val = REG_RD(bp, reg);
7391 if (new_val != orig_func) {
7392 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7393 orig_func, new_val);
7398 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7400 if (CHIP_IS_E1H(bp))
7401 bnx2x_undi_int_disable_e1h(bp, func);
7403 bnx2x_int_disable(bp);
7406 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7410 /* Check if there is any driver already loaded */
7411 val = REG_RD(bp, MISC_REG_UNPREPARED);
7413 /* Check if it is the UNDI driver
7414 * UNDI driver initializes CID offset for normal bell to 0x7
7416 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7417 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7419 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7421 int func = BP_FUNC(bp);
7425 /* clear the UNDI indication */
7426 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7428 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7430 /* try unload UNDI on port 0 */
7433 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7434 DRV_MSG_SEQ_NUMBER_MASK);
7435 reset_code = bnx2x_fw_command(bp, reset_code);
7437 /* if UNDI is loaded on the other port */
7438 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7440 /* send "DONE" for previous unload */
7441 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7443 /* unload UNDI on port 1 */
7446 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7447 DRV_MSG_SEQ_NUMBER_MASK);
7448 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7450 bnx2x_fw_command(bp, reset_code);
7453 /* now it's safe to release the lock */
7454 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7456 bnx2x_undi_int_disable(bp, func);
7458 /* close input traffic and wait for it */
7459 /* Do not rcv packets to BRB */
7461 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7462 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7463 /* Do not direct rcv packets that are not for MCP to
7466 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7467 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7470 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7471 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7474 /* save NIG port swap info */
7475 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7476 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7479 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7482 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7484 /* take the NIG out of reset and restore swap values */
7486 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7487 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7488 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7489 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7491 /* send unload done to the MCP */
7492 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7494 /* restore our func and fw_seq */
7497 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7498 DRV_MSG_SEQ_NUMBER_MASK);
7501 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7505 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7507 u32 val, val2, val3, val4, id;
7510 /* Get the chip revision id and number. */
7511 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7512 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7513 id = ((val & 0xffff) << 16);
7514 val = REG_RD(bp, MISC_REG_CHIP_REV);
7515 id |= ((val & 0xf) << 12);
7516 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7517 id |= ((val & 0xff) << 4);
7518 val = REG_RD(bp, MISC_REG_BOND_ID);
7520 bp->common.chip_id = id;
7521 bp->link_params.chip_id = bp->common.chip_id;
7522 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7524 val = (REG_RD(bp, 0x2874) & 0x55);
7525 if ((bp->common.chip_id & 0x1) ||
7526 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7527 bp->flags |= ONE_PORT_FLAG;
7528 BNX2X_DEV_INFO("single port device\n");
7531 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7532 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7533 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7534 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7535 bp->common.flash_size, bp->common.flash_size);
7537 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7538 bp->link_params.shmem_base = bp->common.shmem_base;
7539 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7541 if (!bp->common.shmem_base ||
7542 (bp->common.shmem_base < 0xA0000) ||
7543 (bp->common.shmem_base >= 0xC0000)) {
7544 BNX2X_DEV_INFO("MCP not active\n");
7545 bp->flags |= NO_MCP_FLAG;
7549 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7550 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7551 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7552 BNX2X_ERR("BAD MCP validity signature\n");
7554 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7555 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7557 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7558 SHARED_HW_CFG_LED_MODE_MASK) >>
7559 SHARED_HW_CFG_LED_MODE_SHIFT);
7561 bp->link_params.feature_config_flags = 0;
7562 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7563 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7564 bp->link_params.feature_config_flags |=
7565 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7567 bp->link_params.feature_config_flags &=
7568 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7570 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7571 bp->common.bc_ver = val;
7572 BNX2X_DEV_INFO("bc_ver %X\n", val);
7573 if (val < BNX2X_BC_VER) {
7574 /* for now only warn
7575 * later we might need to enforce this */
7576 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7577 " please upgrade BC\n", BNX2X_BC_VER, val);
7580 if (BP_E1HVN(bp) == 0) {
7581 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7582 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7584 /* no WOL capability for E1HVN != 0 */
7585 bp->flags |= NO_WOL_FLAG;
7587 BNX2X_DEV_INFO("%sWoL capable\n",
7588 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7590 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7591 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7592 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7593 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7595 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7596 val, val2, val3, val4);
7599 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7602 int port = BP_PORT(bp);
7605 switch (switch_cfg) {
7607 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7610 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7611 switch (ext_phy_type) {
7612 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7613 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7616 bp->port.supported |= (SUPPORTED_10baseT_Half |
7617 SUPPORTED_10baseT_Full |
7618 SUPPORTED_100baseT_Half |
7619 SUPPORTED_100baseT_Full |
7620 SUPPORTED_1000baseT_Full |
7621 SUPPORTED_2500baseX_Full |
7626 SUPPORTED_Asym_Pause);
7629 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7630 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7633 bp->port.supported |= (SUPPORTED_10baseT_Half |
7634 SUPPORTED_10baseT_Full |
7635 SUPPORTED_100baseT_Half |
7636 SUPPORTED_100baseT_Full |
7637 SUPPORTED_1000baseT_Full |
7642 SUPPORTED_Asym_Pause);
7646 BNX2X_ERR("NVRAM config error. "
7647 "BAD SerDes ext_phy_config 0x%x\n",
7648 bp->link_params.ext_phy_config);
7652 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7654 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7657 case SWITCH_CFG_10G:
7658 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7661 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7662 switch (ext_phy_type) {
7663 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7664 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7667 bp->port.supported |= (SUPPORTED_10baseT_Half |
7668 SUPPORTED_10baseT_Full |
7669 SUPPORTED_100baseT_Half |
7670 SUPPORTED_100baseT_Full |
7671 SUPPORTED_1000baseT_Full |
7672 SUPPORTED_2500baseX_Full |
7673 SUPPORTED_10000baseT_Full |
7678 SUPPORTED_Asym_Pause);
7681 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7682 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7685 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7686 SUPPORTED_1000baseT_Full |
7690 SUPPORTED_Asym_Pause);
7693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7694 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7697 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7698 SUPPORTED_2500baseX_Full |
7699 SUPPORTED_1000baseT_Full |
7703 SUPPORTED_Asym_Pause);
7706 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7707 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7710 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7713 SUPPORTED_Asym_Pause);
7716 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7717 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7720 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7721 SUPPORTED_1000baseT_Full |
7724 SUPPORTED_Asym_Pause);
7727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7728 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7731 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7732 SUPPORTED_1000baseT_Full |
7736 SUPPORTED_Asym_Pause);
7739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7740 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7743 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7747 SUPPORTED_Asym_Pause);
7750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7751 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7754 bp->port.supported |= (SUPPORTED_10baseT_Half |
7755 SUPPORTED_10baseT_Full |
7756 SUPPORTED_100baseT_Half |
7757 SUPPORTED_100baseT_Full |
7758 SUPPORTED_1000baseT_Full |
7759 SUPPORTED_10000baseT_Full |
7763 SUPPORTED_Asym_Pause);
7766 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7767 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7768 bp->link_params.ext_phy_config);
7772 BNX2X_ERR("NVRAM config error. "
7773 "BAD XGXS ext_phy_config 0x%x\n",
7774 bp->link_params.ext_phy_config);
7778 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7780 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7785 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7786 bp->port.link_config);
7789 bp->link_params.phy_addr = bp->port.phy_addr;
7791 /* mask what we support according to speed_cap_mask */
7792 if (!(bp->link_params.speed_cap_mask &
7793 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7794 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7796 if (!(bp->link_params.speed_cap_mask &
7797 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7798 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7800 if (!(bp->link_params.speed_cap_mask &
7801 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7802 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7804 if (!(bp->link_params.speed_cap_mask &
7805 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7806 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7808 if (!(bp->link_params.speed_cap_mask &
7809 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7810 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7811 SUPPORTED_1000baseT_Full);
7813 if (!(bp->link_params.speed_cap_mask &
7814 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7815 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7817 if (!(bp->link_params.speed_cap_mask &
7818 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7819 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7821 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7824 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7826 bp->link_params.req_duplex = DUPLEX_FULL;
7828 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7829 case PORT_FEATURE_LINK_SPEED_AUTO:
7830 if (bp->port.supported & SUPPORTED_Autoneg) {
7831 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7832 bp->port.advertising = bp->port.supported;
7835 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7837 if ((ext_phy_type ==
7838 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7840 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7841 /* force 10G, no AN */
7842 bp->link_params.req_line_speed = SPEED_10000;
7843 bp->port.advertising =
7844 (ADVERTISED_10000baseT_Full |
7848 BNX2X_ERR("NVRAM config error. "
7849 "Invalid link_config 0x%x"
7850 " Autoneg not supported\n",
7851 bp->port.link_config);
7856 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7857 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7858 bp->link_params.req_line_speed = SPEED_10;
7859 bp->port.advertising = (ADVERTISED_10baseT_Full |
7862 BNX2X_ERR("NVRAM config error. "
7863 "Invalid link_config 0x%x"
7864 " speed_cap_mask 0x%x\n",
7865 bp->port.link_config,
7866 bp->link_params.speed_cap_mask);
7871 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7872 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7873 bp->link_params.req_line_speed = SPEED_10;
7874 bp->link_params.req_duplex = DUPLEX_HALF;
7875 bp->port.advertising = (ADVERTISED_10baseT_Half |
7878 BNX2X_ERR("NVRAM config error. "
7879 "Invalid link_config 0x%x"
7880 " speed_cap_mask 0x%x\n",
7881 bp->port.link_config,
7882 bp->link_params.speed_cap_mask);
7887 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7888 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7889 bp->link_params.req_line_speed = SPEED_100;
7890 bp->port.advertising = (ADVERTISED_100baseT_Full |
7893 BNX2X_ERR("NVRAM config error. "
7894 "Invalid link_config 0x%x"
7895 " speed_cap_mask 0x%x\n",
7896 bp->port.link_config,
7897 bp->link_params.speed_cap_mask);
7902 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7903 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7904 bp->link_params.req_line_speed = SPEED_100;
7905 bp->link_params.req_duplex = DUPLEX_HALF;
7906 bp->port.advertising = (ADVERTISED_100baseT_Half |
7909 BNX2X_ERR("NVRAM config error. "
7910 "Invalid link_config 0x%x"
7911 " speed_cap_mask 0x%x\n",
7912 bp->port.link_config,
7913 bp->link_params.speed_cap_mask);
7918 case PORT_FEATURE_LINK_SPEED_1G:
7919 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7920 bp->link_params.req_line_speed = SPEED_1000;
7921 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7924 BNX2X_ERR("NVRAM config error. "
7925 "Invalid link_config 0x%x"
7926 " speed_cap_mask 0x%x\n",
7927 bp->port.link_config,
7928 bp->link_params.speed_cap_mask);
7933 case PORT_FEATURE_LINK_SPEED_2_5G:
7934 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7935 bp->link_params.req_line_speed = SPEED_2500;
7936 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7939 BNX2X_ERR("NVRAM config error. "
7940 "Invalid link_config 0x%x"
7941 " speed_cap_mask 0x%x\n",
7942 bp->port.link_config,
7943 bp->link_params.speed_cap_mask);
7948 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7949 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7950 case PORT_FEATURE_LINK_SPEED_10G_KR:
7951 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7952 bp->link_params.req_line_speed = SPEED_10000;
7953 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7956 BNX2X_ERR("NVRAM config error. "
7957 "Invalid link_config 0x%x"
7958 " speed_cap_mask 0x%x\n",
7959 bp->port.link_config,
7960 bp->link_params.speed_cap_mask);
7966 BNX2X_ERR("NVRAM config error. "
7967 "BAD link speed link_config 0x%x\n",
7968 bp->port.link_config);
7969 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7970 bp->port.advertising = bp->port.supported;
7974 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7975 PORT_FEATURE_FLOW_CONTROL_MASK);
7976 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7977 !(bp->port.supported & SUPPORTED_Autoneg))
7978 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7980 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7981 " advertising 0x%x\n",
7982 bp->link_params.req_line_speed,
7983 bp->link_params.req_duplex,
7984 bp->link_params.req_flow_ctrl, bp->port.advertising);
7987 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7989 int port = BP_PORT(bp);
7994 bp->link_params.bp = bp;
7995 bp->link_params.port = port;
7997 bp->link_params.lane_config =
7998 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7999 bp->link_params.ext_phy_config =
8001 dev_info.port_hw_config[port].external_phy_config);
8002 bp->link_params.speed_cap_mask =
8004 dev_info.port_hw_config[port].speed_capability_mask);
8006 bp->port.link_config =
8007 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8009 /* Get the 4 lanes xgxs config rx and tx */
8010 for (i = 0; i < 2; i++) {
8012 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8013 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8014 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8017 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8018 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8019 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8022 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8023 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8024 bp->link_params.feature_config_flags |=
8025 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8027 bp->link_params.feature_config_flags &=
8028 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8030 /* If the device is capable of WoL, set the default state according
8033 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8034 (config & PORT_FEATURE_WOL_ENABLED));
8036 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8037 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8038 bp->link_params.lane_config,
8039 bp->link_params.ext_phy_config,
8040 bp->link_params.speed_cap_mask, bp->port.link_config);
8042 bp->link_params.switch_cfg = (bp->port.link_config &
8043 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8044 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8046 bnx2x_link_settings_requested(bp);
8048 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8049 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8050 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8051 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8052 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8053 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8054 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8055 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8056 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8057 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8060 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8062 int func = BP_FUNC(bp);
8066 bnx2x_get_common_hwinfo(bp);
8070 if (CHIP_IS_E1H(bp)) {
8072 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8074 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8075 FUNC_MF_CFG_E1HOV_TAG_MASK);
8076 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8080 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8082 func, bp->e1hov, bp->e1hov);
8084 BNX2X_DEV_INFO("Single function mode\n");
8086 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8087 " aborting\n", func);
8093 if (!BP_NOMCP(bp)) {
8094 bnx2x_get_port_hwinfo(bp);
8096 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8097 DRV_MSG_SEQ_NUMBER_MASK);
8098 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8102 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8103 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8104 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8105 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8106 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8107 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8108 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8109 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8110 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8111 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8112 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8114 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8122 /* only supposed to happen on emulation/FPGA */
8123 BNX2X_ERR("warning random MAC workaround active\n");
8124 random_ether_addr(bp->dev->dev_addr);
8125 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8131 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8133 int func = BP_FUNC(bp);
8137 /* Disable interrupt handling until HW is initialized */
8138 atomic_set(&bp->intr_sem, 1);
8140 mutex_init(&bp->port.phy_mutex);
8142 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8143 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8145 rc = bnx2x_get_hwinfo(bp);
8147 /* need to reset chip if undi was active */
8149 bnx2x_undi_unload(bp);
8151 if (CHIP_REV_IS_FPGA(bp))
8152 printk(KERN_ERR PFX "FPGA detected\n");
8154 if (BP_NOMCP(bp) && (func == 0))
8156 "MCP disabled, must load devices in order!\n");
8158 /* Set multi queue mode */
8159 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8160 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8162 "Multi disabled since int_mode requested is not MSI-X\n");
8163 multi_mode = ETH_RSS_MODE_DISABLED;
8165 bp->multi_mode = multi_mode;
8170 bp->flags &= ~TPA_ENABLE_FLAG;
8171 bp->dev->features &= ~NETIF_F_LRO;
8173 bp->flags |= TPA_ENABLE_FLAG;
8174 bp->dev->features |= NETIF_F_LRO;
8179 bp->tx_ring_size = MAX_TX_AVAIL;
8180 bp->rx_ring_size = MAX_RX_AVAIL;
8187 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8188 bp->current_interval = (poll ? poll : timer_interval);
8190 init_timer(&bp->timer);
8191 bp->timer.expires = jiffies + bp->current_interval;
8192 bp->timer.data = (unsigned long) bp;
8193 bp->timer.function = bnx2x_timer;
8199 * ethtool service functions
8202 /* All ethtool functions called with rtnl_lock */
8204 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8206 struct bnx2x *bp = netdev_priv(dev);
8208 cmd->supported = bp->port.supported;
8209 cmd->advertising = bp->port.advertising;
8211 if (netif_carrier_ok(dev)) {
8212 cmd->speed = bp->link_vars.line_speed;
8213 cmd->duplex = bp->link_vars.duplex;
8215 cmd->speed = bp->link_params.req_line_speed;
8216 cmd->duplex = bp->link_params.req_duplex;
8221 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8222 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8223 if (vn_max_rate < cmd->speed)
8224 cmd->speed = vn_max_rate;
8227 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8229 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8231 switch (ext_phy_type) {
8232 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8233 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8234 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8235 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8236 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8237 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8238 cmd->port = PORT_FIBRE;
8241 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8242 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8243 cmd->port = PORT_TP;
8246 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8247 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8248 bp->link_params.ext_phy_config);
8252 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8253 bp->link_params.ext_phy_config);
8257 cmd->port = PORT_TP;
8259 cmd->phy_address = bp->port.phy_addr;
8260 cmd->transceiver = XCVR_INTERNAL;
8262 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8263 cmd->autoneg = AUTONEG_ENABLE;
8265 cmd->autoneg = AUTONEG_DISABLE;
8270 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8271 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8272 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8273 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8274 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8275 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8276 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8281 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8283 struct bnx2x *bp = netdev_priv(dev);
8289 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8290 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8291 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8292 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8293 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8294 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8295 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8297 if (cmd->autoneg == AUTONEG_ENABLE) {
8298 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8299 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8303 /* advertise the requested speed and duplex if supported */
8304 cmd->advertising &= bp->port.supported;
8306 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8307 bp->link_params.req_duplex = DUPLEX_FULL;
8308 bp->port.advertising |= (ADVERTISED_Autoneg |
8311 } else { /* forced speed */
8312 /* advertise the requested speed and duplex if supported */
8313 switch (cmd->speed) {
8315 if (cmd->duplex == DUPLEX_FULL) {
8316 if (!(bp->port.supported &
8317 SUPPORTED_10baseT_Full)) {
8319 "10M full not supported\n");
8323 advertising = (ADVERTISED_10baseT_Full |
8326 if (!(bp->port.supported &
8327 SUPPORTED_10baseT_Half)) {
8329 "10M half not supported\n");
8333 advertising = (ADVERTISED_10baseT_Half |
8339 if (cmd->duplex == DUPLEX_FULL) {
8340 if (!(bp->port.supported &
8341 SUPPORTED_100baseT_Full)) {
8343 "100M full not supported\n");
8347 advertising = (ADVERTISED_100baseT_Full |
8350 if (!(bp->port.supported &
8351 SUPPORTED_100baseT_Half)) {
8353 "100M half not supported\n");
8357 advertising = (ADVERTISED_100baseT_Half |
8363 if (cmd->duplex != DUPLEX_FULL) {
8364 DP(NETIF_MSG_LINK, "1G half not supported\n");
8368 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8369 DP(NETIF_MSG_LINK, "1G full not supported\n");
8373 advertising = (ADVERTISED_1000baseT_Full |
8378 if (cmd->duplex != DUPLEX_FULL) {
8380 "2.5G half not supported\n");
8384 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8386 "2.5G full not supported\n");
8390 advertising = (ADVERTISED_2500baseX_Full |
8395 if (cmd->duplex != DUPLEX_FULL) {
8396 DP(NETIF_MSG_LINK, "10G half not supported\n");
8400 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8401 DP(NETIF_MSG_LINK, "10G full not supported\n");
8405 advertising = (ADVERTISED_10000baseT_Full |
8410 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8414 bp->link_params.req_line_speed = cmd->speed;
8415 bp->link_params.req_duplex = cmd->duplex;
8416 bp->port.advertising = advertising;
8419 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8420 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8421 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8422 bp->port.advertising);
8424 if (netif_running(dev)) {
8425 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8432 #define PHY_FW_VER_LEN 10
8434 static void bnx2x_get_drvinfo(struct net_device *dev,
8435 struct ethtool_drvinfo *info)
8437 struct bnx2x *bp = netdev_priv(dev);
8438 u8 phy_fw_ver[PHY_FW_VER_LEN];
8440 strcpy(info->driver, DRV_MODULE_NAME);
8441 strcpy(info->version, DRV_MODULE_VERSION);
8443 phy_fw_ver[0] = '\0';
8445 bnx2x_acquire_phy_lock(bp);
8446 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8447 (bp->state != BNX2X_STATE_CLOSED),
8448 phy_fw_ver, PHY_FW_VER_LEN);
8449 bnx2x_release_phy_lock(bp);
8452 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8453 (bp->common.bc_ver & 0xff0000) >> 16,
8454 (bp->common.bc_ver & 0xff00) >> 8,
8455 (bp->common.bc_ver & 0xff),
8456 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8457 strcpy(info->bus_info, pci_name(bp->pdev));
8458 info->n_stats = BNX2X_NUM_STATS;
8459 info->testinfo_len = BNX2X_NUM_TESTS;
8460 info->eedump_len = bp->common.flash_size;
8461 info->regdump_len = 0;
8464 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8466 struct bnx2x *bp = netdev_priv(dev);
8468 if (bp->flags & NO_WOL_FLAG) {
8472 wol->supported = WAKE_MAGIC;
8474 wol->wolopts = WAKE_MAGIC;
8478 memset(&wol->sopass, 0, sizeof(wol->sopass));
8481 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8483 struct bnx2x *bp = netdev_priv(dev);
8485 if (wol->wolopts & ~WAKE_MAGIC)
8488 if (wol->wolopts & WAKE_MAGIC) {
8489 if (bp->flags & NO_WOL_FLAG)
8499 static u32 bnx2x_get_msglevel(struct net_device *dev)
8501 struct bnx2x *bp = netdev_priv(dev);
8503 return bp->msglevel;
8506 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8508 struct bnx2x *bp = netdev_priv(dev);
8510 if (capable(CAP_NET_ADMIN))
8511 bp->msglevel = level;
8514 static int bnx2x_nway_reset(struct net_device *dev)
8516 struct bnx2x *bp = netdev_priv(dev);
8521 if (netif_running(dev)) {
8522 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8529 static int bnx2x_get_eeprom_len(struct net_device *dev)
8531 struct bnx2x *bp = netdev_priv(dev);
8533 return bp->common.flash_size;
8536 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8538 int port = BP_PORT(bp);
8542 /* adjust timeout for emulation/FPGA */
8543 count = NVRAM_TIMEOUT_COUNT;
8544 if (CHIP_REV_IS_SLOW(bp))
8547 /* request access to nvram interface */
8548 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8549 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8551 for (i = 0; i < count*10; i++) {
8552 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8553 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8559 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8560 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8567 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8569 int port = BP_PORT(bp);
8573 /* adjust timeout for emulation/FPGA */
8574 count = NVRAM_TIMEOUT_COUNT;
8575 if (CHIP_REV_IS_SLOW(bp))
8578 /* relinquish nvram interface */
8579 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8580 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8582 for (i = 0; i < count*10; i++) {
8583 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8584 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8590 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8591 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8598 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8602 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8604 /* enable both bits, even on read */
8605 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8606 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8607 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8610 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8614 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8616 /* disable both bits, even after read */
8617 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8618 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8619 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8622 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8628 /* build the command word */
8629 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8631 /* need to clear DONE bit separately */
8632 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8634 /* address of the NVRAM to read from */
8635 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8636 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8638 /* issue a read command */
8639 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8641 /* adjust timeout for emulation/FPGA */
8642 count = NVRAM_TIMEOUT_COUNT;
8643 if (CHIP_REV_IS_SLOW(bp))
8646 /* wait for completion */
8649 for (i = 0; i < count; i++) {
8651 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8653 if (val & MCPR_NVM_COMMAND_DONE) {
8654 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8655 /* we read nvram data in cpu order
8656 * but ethtool sees it as an array of bytes
8657 * converting to big-endian will do the work */
8658 val = cpu_to_be32(val);
8668 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8675 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8677 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8682 if (offset + buf_size > bp->common.flash_size) {
8683 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8684 " buf_size (0x%x) > flash_size (0x%x)\n",
8685 offset, buf_size, bp->common.flash_size);
8689 /* request access to nvram interface */
8690 rc = bnx2x_acquire_nvram_lock(bp);
8694 /* enable access to nvram interface */
8695 bnx2x_enable_nvram_access(bp);
8697 /* read the first word(s) */
8698 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8699 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8700 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8701 memcpy(ret_buf, &val, 4);
8703 /* advance to the next dword */
8704 offset += sizeof(u32);
8705 ret_buf += sizeof(u32);
8706 buf_size -= sizeof(u32);
8711 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8712 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8713 memcpy(ret_buf, &val, 4);
8716 /* disable access to nvram interface */
8717 bnx2x_disable_nvram_access(bp);
8718 bnx2x_release_nvram_lock(bp);
8723 static int bnx2x_get_eeprom(struct net_device *dev,
8724 struct ethtool_eeprom *eeprom, u8 *eebuf)
8726 struct bnx2x *bp = netdev_priv(dev);
8729 if (!netif_running(dev))
8732 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8733 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8734 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8735 eeprom->len, eeprom->len);
8737 /* parameters already validated in ethtool_get_eeprom */
8739 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8744 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8749 /* build the command word */
8750 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8752 /* need to clear DONE bit separately */
8753 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8755 /* write the data */
8756 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8758 /* address of the NVRAM to write to */
8759 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8760 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8762 /* issue the write command */
8763 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8765 /* adjust timeout for emulation/FPGA */
8766 count = NVRAM_TIMEOUT_COUNT;
8767 if (CHIP_REV_IS_SLOW(bp))
8770 /* wait for completion */
8772 for (i = 0; i < count; i++) {
8774 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8775 if (val & MCPR_NVM_COMMAND_DONE) {
8784 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8786 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8794 if (offset + buf_size > bp->common.flash_size) {
8795 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8796 " buf_size (0x%x) > flash_size (0x%x)\n",
8797 offset, buf_size, bp->common.flash_size);
8801 /* request access to nvram interface */
8802 rc = bnx2x_acquire_nvram_lock(bp);
8806 /* enable access to nvram interface */
8807 bnx2x_enable_nvram_access(bp);
8809 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8810 align_offset = (offset & ~0x03);
8811 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8814 val &= ~(0xff << BYTE_OFFSET(offset));
8815 val |= (*data_buf << BYTE_OFFSET(offset));
8817 /* nvram data is returned as an array of bytes
8818 * convert it back to cpu order */
8819 val = be32_to_cpu(val);
8821 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8825 /* disable access to nvram interface */
8826 bnx2x_disable_nvram_access(bp);
8827 bnx2x_release_nvram_lock(bp);
8832 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8840 if (buf_size == 1) /* ethtool */
8841 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8843 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8845 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8850 if (offset + buf_size > bp->common.flash_size) {
8851 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8852 " buf_size (0x%x) > flash_size (0x%x)\n",
8853 offset, buf_size, bp->common.flash_size);
8857 /* request access to nvram interface */
8858 rc = bnx2x_acquire_nvram_lock(bp);
8862 /* enable access to nvram interface */
8863 bnx2x_enable_nvram_access(bp);
8866 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8867 while ((written_so_far < buf_size) && (rc == 0)) {
8868 if (written_so_far == (buf_size - sizeof(u32)))
8869 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8870 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8871 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8872 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8873 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8875 memcpy(&val, data_buf, 4);
8877 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8879 /* advance to the next dword */
8880 offset += sizeof(u32);
8881 data_buf += sizeof(u32);
8882 written_so_far += sizeof(u32);
8886 /* disable access to nvram interface */
8887 bnx2x_disable_nvram_access(bp);
8888 bnx2x_release_nvram_lock(bp);
8893 static int bnx2x_set_eeprom(struct net_device *dev,
8894 struct ethtool_eeprom *eeprom, u8 *eebuf)
8896 struct bnx2x *bp = netdev_priv(dev);
8899 if (!netif_running(dev))
8902 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8903 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8904 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8905 eeprom->len, eeprom->len);
8907 /* parameters already validated in ethtool_set_eeprom */
8909 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8910 if (eeprom->magic == 0x00504859)
8913 bnx2x_acquire_phy_lock(bp);
8914 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8915 bp->link_params.ext_phy_config,
8916 (bp->state != BNX2X_STATE_CLOSED),
8917 eebuf, eeprom->len);
8918 if ((bp->state == BNX2X_STATE_OPEN) ||
8919 (bp->state == BNX2X_STATE_DISABLED)) {
8920 rc |= bnx2x_link_reset(&bp->link_params,
8922 rc |= bnx2x_phy_init(&bp->link_params,
8925 bnx2x_release_phy_lock(bp);
8927 } else /* Only the PMF can access the PHY */
8930 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8935 static int bnx2x_get_coalesce(struct net_device *dev,
8936 struct ethtool_coalesce *coal)
8938 struct bnx2x *bp = netdev_priv(dev);
8940 memset(coal, 0, sizeof(struct ethtool_coalesce));
8942 coal->rx_coalesce_usecs = bp->rx_ticks;
8943 coal->tx_coalesce_usecs = bp->tx_ticks;
8948 static int bnx2x_set_coalesce(struct net_device *dev,
8949 struct ethtool_coalesce *coal)
8951 struct bnx2x *bp = netdev_priv(dev);
8953 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8954 if (bp->rx_ticks > 3000)
8955 bp->rx_ticks = 3000;
8957 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8958 if (bp->tx_ticks > 0x3000)
8959 bp->tx_ticks = 0x3000;
8961 if (netif_running(dev))
8962 bnx2x_update_coalesce(bp);
8967 static void bnx2x_get_ringparam(struct net_device *dev,
8968 struct ethtool_ringparam *ering)
8970 struct bnx2x *bp = netdev_priv(dev);
8972 ering->rx_max_pending = MAX_RX_AVAIL;
8973 ering->rx_mini_max_pending = 0;
8974 ering->rx_jumbo_max_pending = 0;
8976 ering->rx_pending = bp->rx_ring_size;
8977 ering->rx_mini_pending = 0;
8978 ering->rx_jumbo_pending = 0;
8980 ering->tx_max_pending = MAX_TX_AVAIL;
8981 ering->tx_pending = bp->tx_ring_size;
8984 static int bnx2x_set_ringparam(struct net_device *dev,
8985 struct ethtool_ringparam *ering)
8987 struct bnx2x *bp = netdev_priv(dev);
8990 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8991 (ering->tx_pending > MAX_TX_AVAIL) ||
8992 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8995 bp->rx_ring_size = ering->rx_pending;
8996 bp->tx_ring_size = ering->tx_pending;
8998 if (netif_running(dev)) {
8999 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9000 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9006 static void bnx2x_get_pauseparam(struct net_device *dev,
9007 struct ethtool_pauseparam *epause)
9009 struct bnx2x *bp = netdev_priv(dev);
9011 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9012 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9014 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9015 BNX2X_FLOW_CTRL_RX);
9016 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9017 BNX2X_FLOW_CTRL_TX);
9019 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9020 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9021 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9024 static int bnx2x_set_pauseparam(struct net_device *dev,
9025 struct ethtool_pauseparam *epause)
9027 struct bnx2x *bp = netdev_priv(dev);
9032 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9033 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9034 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9036 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9038 if (epause->rx_pause)
9039 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9041 if (epause->tx_pause)
9042 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9044 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9045 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9047 if (epause->autoneg) {
9048 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9049 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9053 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9054 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9058 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9060 if (netif_running(dev)) {
9061 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9068 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9070 struct bnx2x *bp = netdev_priv(dev);
9074 /* TPA requires Rx CSUM offloading */
9075 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9076 if (!(dev->features & NETIF_F_LRO)) {
9077 dev->features |= NETIF_F_LRO;
9078 bp->flags |= TPA_ENABLE_FLAG;
9082 } else if (dev->features & NETIF_F_LRO) {
9083 dev->features &= ~NETIF_F_LRO;
9084 bp->flags &= ~TPA_ENABLE_FLAG;
9088 if (changed && netif_running(dev)) {
9089 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9090 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9096 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9098 struct bnx2x *bp = netdev_priv(dev);
9103 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9105 struct bnx2x *bp = netdev_priv(dev);
9110 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9111 TPA'ed packets will be discarded due to wrong TCP CSUM */
9113 u32 flags = ethtool_op_get_flags(dev);
9115 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9121 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9124 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9125 dev->features |= NETIF_F_TSO6;
9127 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9128 dev->features &= ~NETIF_F_TSO6;
9134 static const struct {
9135 char string[ETH_GSTRING_LEN];
9136 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9137 { "register_test (offline)" },
9138 { "memory_test (offline)" },
9139 { "loopback_test (offline)" },
9140 { "nvram_test (online)" },
9141 { "interrupt_test (online)" },
9142 { "link_test (online)" },
9143 { "idle check (online)" }
9146 static int bnx2x_self_test_count(struct net_device *dev)
9148 return BNX2X_NUM_TESTS;
9151 static int bnx2x_test_registers(struct bnx2x *bp)
9153 int idx, i, rc = -ENODEV;
9155 int port = BP_PORT(bp);
9156 static const struct {
9161 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9162 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9163 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9164 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9165 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9166 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9167 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9168 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9169 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9170 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9171 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9172 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9173 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9174 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9175 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9176 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9177 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9178 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9179 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9180 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9181 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9182 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9183 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9184 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9185 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9186 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9187 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9188 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9189 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9190 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9191 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9192 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9193 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9194 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9195 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9196 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9197 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9198 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9200 { 0xffffffff, 0, 0x00000000 }
9203 if (!netif_running(bp->dev))
9206 /* Repeat the test twice:
9207 First by writing 0x00000000, second by writing 0xffffffff */
9208 for (idx = 0; idx < 2; idx++) {
9215 wr_val = 0xffffffff;
9219 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9220 u32 offset, mask, save_val, val;
9222 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9223 mask = reg_tbl[i].mask;
9225 save_val = REG_RD(bp, offset);
9227 REG_WR(bp, offset, wr_val);
9228 val = REG_RD(bp, offset);
9230 /* Restore the original register's value */
9231 REG_WR(bp, offset, save_val);
9233 /* verify that value is as expected value */
9234 if ((val & mask) != (wr_val & mask))
9245 static int bnx2x_test_memory(struct bnx2x *bp)
9247 int i, j, rc = -ENODEV;
9249 static const struct {
9253 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9254 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9255 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9256 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9257 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9258 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9259 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9263 static const struct {
9269 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9270 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9271 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9272 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9273 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9274 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9276 { NULL, 0xffffffff, 0, 0 }
9279 if (!netif_running(bp->dev))
9282 /* Go through all the memories */
9283 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9284 for (j = 0; j < mem_tbl[i].size; j++)
9285 REG_RD(bp, mem_tbl[i].offset + j*4);
9287 /* Check the parity status */
9288 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9289 val = REG_RD(bp, prty_tbl[i].offset);
9290 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9291 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9293 "%s is 0x%x\n", prty_tbl[i].name, val);
9304 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9309 while (bnx2x_link_test(bp) && cnt--)
9313 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9315 unsigned int pkt_size, num_pkts, i;
9316 struct sk_buff *skb;
9317 unsigned char *packet;
9318 struct bnx2x_fastpath *fp = &bp->fp[0];
9319 u16 tx_start_idx, tx_idx;
9320 u16 rx_start_idx, rx_idx;
9322 struct sw_tx_bd *tx_buf;
9323 struct eth_tx_bd *tx_bd;
9325 union eth_rx_cqe *cqe;
9327 struct sw_rx_bd *rx_buf;
9331 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9332 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9333 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9335 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
9337 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
9338 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9339 /* wait until link state is restored */
9341 while (cnt-- && bnx2x_test_link(&bp->link_params,
9348 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9351 goto test_loopback_exit;
9353 packet = skb_put(skb, pkt_size);
9354 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9355 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9356 for (i = ETH_HLEN; i < pkt_size; i++)
9357 packet[i] = (unsigned char) (i & 0xff);
9360 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9361 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9363 pkt_prod = fp->tx_pkt_prod++;
9364 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9365 tx_buf->first_bd = fp->tx_bd_prod;
9368 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9369 mapping = pci_map_single(bp->pdev, skb->data,
9370 skb_headlen(skb), PCI_DMA_TODEVICE);
9371 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9372 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9373 tx_bd->nbd = cpu_to_le16(1);
9374 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9375 tx_bd->vlan = cpu_to_le16(pkt_prod);
9376 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9377 ETH_TX_BD_FLAGS_END_BD);
9378 tx_bd->general_data = ((UNICAST_ADDRESS <<
9379 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9383 fp->hw_tx_prods->bds_prod =
9384 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9385 mb(); /* FW restriction: must not reorder writing nbd and packets */
9386 fp->hw_tx_prods->packets_prod =
9387 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9388 DOORBELL(bp, FP_IDX(fp), 0);
9394 bp->dev->trans_start = jiffies;
9398 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9399 if (tx_idx != tx_start_idx + num_pkts)
9400 goto test_loopback_exit;
9402 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9403 if (rx_idx != rx_start_idx + num_pkts)
9404 goto test_loopback_exit;
9406 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9407 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9408 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9409 goto test_loopback_rx_exit;
9411 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9412 if (len != pkt_size)
9413 goto test_loopback_rx_exit;
9415 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9417 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9418 for (i = ETH_HLEN; i < pkt_size; i++)
9419 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9420 goto test_loopback_rx_exit;
9424 test_loopback_rx_exit:
9426 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9427 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9428 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9429 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9431 /* Update producers */
9432 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9436 bp->link_params.loopback_mode = LOOPBACK_NONE;
9441 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9445 if (!netif_running(bp->dev))
9446 return BNX2X_LOOPBACK_FAILED;
9448 bnx2x_netif_stop(bp, 1);
9449 bnx2x_acquire_phy_lock(bp);
9451 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9452 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9453 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9456 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9457 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9458 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9461 bnx2x_release_phy_lock(bp);
9462 bnx2x_netif_start(bp);
9467 #define CRC32_RESIDUAL 0xdebb20e3
9469 static int bnx2x_test_nvram(struct bnx2x *bp)
9471 static const struct {
9475 { 0, 0x14 }, /* bootstrap */
9476 { 0x14, 0xec }, /* dir */
9477 { 0x100, 0x350 }, /* manuf_info */
9478 { 0x450, 0xf0 }, /* feature_info */
9479 { 0x640, 0x64 }, /* upgrade_key_info */
9481 { 0x708, 0x70 }, /* manuf_key_info */
9486 u8 *data = (u8 *)buf;
9490 rc = bnx2x_nvram_read(bp, 0, data, 4);
9492 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9493 goto test_nvram_exit;
9496 magic = be32_to_cpu(buf[0]);
9497 if (magic != 0x669955aa) {
9498 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9500 goto test_nvram_exit;
9503 for (i = 0; nvram_tbl[i].size; i++) {
9505 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9509 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9510 goto test_nvram_exit;
9513 csum = ether_crc_le(nvram_tbl[i].size, data);
9514 if (csum != CRC32_RESIDUAL) {
9516 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9518 goto test_nvram_exit;
9526 static int bnx2x_test_intr(struct bnx2x *bp)
9528 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9531 if (!netif_running(bp->dev))
9534 config->hdr.length = 0;
9536 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9538 config->hdr.offset = BP_FUNC(bp);
9539 config->hdr.client_id = BP_CL_ID(bp);
9540 config->hdr.reserved1 = 0;
9542 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9543 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9544 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9546 bp->set_mac_pending++;
9547 for (i = 0; i < 10; i++) {
9548 if (!bp->set_mac_pending)
9550 msleep_interruptible(10);
9559 static void bnx2x_self_test(struct net_device *dev,
9560 struct ethtool_test *etest, u64 *buf)
9562 struct bnx2x *bp = netdev_priv(dev);
9564 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9566 if (!netif_running(dev))
9569 /* offline tests are not supported in MF mode */
9571 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9573 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9576 link_up = bp->link_vars.link_up;
9577 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9578 bnx2x_nic_load(bp, LOAD_DIAG);
9579 /* wait until link state is restored */
9580 bnx2x_wait_for_link(bp, link_up);
9582 if (bnx2x_test_registers(bp) != 0) {
9584 etest->flags |= ETH_TEST_FL_FAILED;
9586 if (bnx2x_test_memory(bp) != 0) {
9588 etest->flags |= ETH_TEST_FL_FAILED;
9590 buf[2] = bnx2x_test_loopback(bp, link_up);
9592 etest->flags |= ETH_TEST_FL_FAILED;
9594 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9595 bnx2x_nic_load(bp, LOAD_NORMAL);
9596 /* wait until link state is restored */
9597 bnx2x_wait_for_link(bp, link_up);
9599 if (bnx2x_test_nvram(bp) != 0) {
9601 etest->flags |= ETH_TEST_FL_FAILED;
9603 if (bnx2x_test_intr(bp) != 0) {
9605 etest->flags |= ETH_TEST_FL_FAILED;
9608 if (bnx2x_link_test(bp) != 0) {
9610 etest->flags |= ETH_TEST_FL_FAILED;
9613 #ifdef BNX2X_EXTRA_DEBUG
9614 bnx2x_panic_dump(bp);
9618 static const struct {
9621 u8 string[ETH_GSTRING_LEN];
9622 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9623 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9624 { Q_STATS_OFFSET32(error_bytes_received_hi),
9625 8, "[%d]: rx_error_bytes" },
9626 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9627 8, "[%d]: rx_ucast_packets" },
9628 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9629 8, "[%d]: rx_mcast_packets" },
9630 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9631 8, "[%d]: rx_bcast_packets" },
9632 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9633 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9634 4, "[%d]: rx_phy_ip_err_discards"},
9635 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9636 4, "[%d]: rx_skb_alloc_discard" },
9637 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9639 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9640 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9641 8, "[%d]: tx_packets" }
9644 static const struct {
9648 #define STATS_FLAGS_PORT 1
9649 #define STATS_FLAGS_FUNC 2
9650 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9651 u8 string[ETH_GSTRING_LEN];
9652 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9653 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9654 8, STATS_FLAGS_BOTH, "rx_bytes" },
9655 { STATS_OFFSET32(error_bytes_received_hi),
9656 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9657 { STATS_OFFSET32(total_unicast_packets_received_hi),
9658 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9659 { STATS_OFFSET32(total_multicast_packets_received_hi),
9660 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9661 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9662 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9663 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9664 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9665 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9666 8, STATS_FLAGS_PORT, "rx_align_errors" },
9667 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9668 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9669 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9670 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9671 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9672 8, STATS_FLAGS_PORT, "rx_fragments" },
9673 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9674 8, STATS_FLAGS_PORT, "rx_jabbers" },
9675 { STATS_OFFSET32(no_buff_discard_hi),
9676 8, STATS_FLAGS_BOTH, "rx_discards" },
9677 { STATS_OFFSET32(mac_filter_discard),
9678 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9679 { STATS_OFFSET32(xxoverflow_discard),
9680 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9681 { STATS_OFFSET32(brb_drop_hi),
9682 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9683 { STATS_OFFSET32(brb_truncate_hi),
9684 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9685 { STATS_OFFSET32(pause_frames_received_hi),
9686 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9687 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9688 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9689 { STATS_OFFSET32(nig_timer_max),
9690 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9691 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9692 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9693 { STATS_OFFSET32(rx_skb_alloc_failed),
9694 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9695 { STATS_OFFSET32(hw_csum_err),
9696 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9698 { STATS_OFFSET32(total_bytes_transmitted_hi),
9699 8, STATS_FLAGS_BOTH, "tx_bytes" },
9700 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9701 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9702 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9703 8, STATS_FLAGS_BOTH, "tx_packets" },
9704 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9705 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9706 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9707 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9708 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9709 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9710 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9711 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9712 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9713 8, STATS_FLAGS_PORT, "tx_deferred" },
9714 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9715 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9716 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9717 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9718 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9719 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9720 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9721 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9722 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9723 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9724 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9725 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9726 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9727 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9728 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9729 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9730 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9731 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9732 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9733 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9734 { STATS_OFFSET32(pause_frames_sent_hi),
9735 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9738 #define IS_PORT_STAT(i) \
9739 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9740 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9741 #define IS_E1HMF_MODE_STAT(bp) \
9742 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9744 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9746 struct bnx2x *bp = netdev_priv(dev);
9749 switch (stringset) {
9753 for_each_queue(bp, i) {
9754 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9755 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9756 bnx2x_q_stats_arr[j].string, i);
9757 k += BNX2X_NUM_Q_STATS;
9759 if (IS_E1HMF_MODE_STAT(bp))
9761 for (j = 0; j < BNX2X_NUM_STATS; j++)
9762 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9763 bnx2x_stats_arr[j].string);
9765 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9766 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9768 strcpy(buf + j*ETH_GSTRING_LEN,
9769 bnx2x_stats_arr[i].string);
9776 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9781 static int bnx2x_get_stats_count(struct net_device *dev)
9783 struct bnx2x *bp = netdev_priv(dev);
9787 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9788 if (!IS_E1HMF_MODE_STAT(bp))
9789 num_stats += BNX2X_NUM_STATS;
9791 if (IS_E1HMF_MODE_STAT(bp)) {
9793 for (i = 0; i < BNX2X_NUM_STATS; i++)
9794 if (IS_FUNC_STAT(i))
9797 num_stats = BNX2X_NUM_STATS;
9803 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9804 struct ethtool_stats *stats, u64 *buf)
9806 struct bnx2x *bp = netdev_priv(dev);
9807 u32 *hw_stats, *offset;
9812 for_each_queue(bp, i) {
9813 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9814 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9815 if (bnx2x_q_stats_arr[j].size == 0) {
9816 /* skip this counter */
9820 offset = (hw_stats +
9821 bnx2x_q_stats_arr[j].offset);
9822 if (bnx2x_q_stats_arr[j].size == 4) {
9823 /* 4-byte counter */
9824 buf[k + j] = (u64) *offset;
9827 /* 8-byte counter */
9828 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9830 k += BNX2X_NUM_Q_STATS;
9832 if (IS_E1HMF_MODE_STAT(bp))
9834 hw_stats = (u32 *)&bp->eth_stats;
9835 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9836 if (bnx2x_stats_arr[j].size == 0) {
9837 /* skip this counter */
9841 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9842 if (bnx2x_stats_arr[j].size == 4) {
9843 /* 4-byte counter */
9844 buf[k + j] = (u64) *offset;
9847 /* 8-byte counter */
9848 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9851 hw_stats = (u32 *)&bp->eth_stats;
9852 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9853 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9855 if (bnx2x_stats_arr[i].size == 0) {
9856 /* skip this counter */
9861 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9862 if (bnx2x_stats_arr[i].size == 4) {
9863 /* 4-byte counter */
9864 buf[j] = (u64) *offset;
9868 /* 8-byte counter */
9869 buf[j] = HILO_U64(*offset, *(offset + 1));
9875 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9877 struct bnx2x *bp = netdev_priv(dev);
9878 int port = BP_PORT(bp);
9881 if (!netif_running(dev))
9890 for (i = 0; i < (data * 2); i++) {
9892 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9893 bp->link_params.hw_led_mode,
9894 bp->link_params.chip_id);
9896 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9897 bp->link_params.hw_led_mode,
9898 bp->link_params.chip_id);
9900 msleep_interruptible(500);
9901 if (signal_pending(current))
9905 if (bp->link_vars.link_up)
9906 bnx2x_set_led(bp, port, LED_MODE_OPER,
9907 bp->link_vars.line_speed,
9908 bp->link_params.hw_led_mode,
9909 bp->link_params.chip_id);
9914 static struct ethtool_ops bnx2x_ethtool_ops = {
9915 .get_settings = bnx2x_get_settings,
9916 .set_settings = bnx2x_set_settings,
9917 .get_drvinfo = bnx2x_get_drvinfo,
9918 .get_wol = bnx2x_get_wol,
9919 .set_wol = bnx2x_set_wol,
9920 .get_msglevel = bnx2x_get_msglevel,
9921 .set_msglevel = bnx2x_set_msglevel,
9922 .nway_reset = bnx2x_nway_reset,
9923 .get_link = ethtool_op_get_link,
9924 .get_eeprom_len = bnx2x_get_eeprom_len,
9925 .get_eeprom = bnx2x_get_eeprom,
9926 .set_eeprom = bnx2x_set_eeprom,
9927 .get_coalesce = bnx2x_get_coalesce,
9928 .set_coalesce = bnx2x_set_coalesce,
9929 .get_ringparam = bnx2x_get_ringparam,
9930 .set_ringparam = bnx2x_set_ringparam,
9931 .get_pauseparam = bnx2x_get_pauseparam,
9932 .set_pauseparam = bnx2x_set_pauseparam,
9933 .get_rx_csum = bnx2x_get_rx_csum,
9934 .set_rx_csum = bnx2x_set_rx_csum,
9935 .get_tx_csum = ethtool_op_get_tx_csum,
9936 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9937 .set_flags = bnx2x_set_flags,
9938 .get_flags = ethtool_op_get_flags,
9939 .get_sg = ethtool_op_get_sg,
9940 .set_sg = ethtool_op_set_sg,
9941 .get_tso = ethtool_op_get_tso,
9942 .set_tso = bnx2x_set_tso,
9943 .self_test_count = bnx2x_self_test_count,
9944 .self_test = bnx2x_self_test,
9945 .get_strings = bnx2x_get_strings,
9946 .phys_id = bnx2x_phys_id,
9947 .get_stats_count = bnx2x_get_stats_count,
9948 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9951 /* end of ethtool_ops */
9953 /****************************************************************************
9954 * General service functions
9955 ****************************************************************************/
9957 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9961 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9965 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9966 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9967 PCI_PM_CTRL_PME_STATUS));
9969 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9970 /* delay required during transition out of D3hot */
9975 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9979 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9981 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9984 /* No more memory access after this point until
9985 * device is brought back to D0.
9995 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9999 /* Tell compiler that status block fields can change */
10001 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10002 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10004 return (fp->rx_comp_cons != rx_cons_sb);
10008 * net_device service functions
10011 static int bnx2x_poll(struct napi_struct *napi, int budget)
10013 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10015 struct bnx2x *bp = fp->bp;
10018 #ifdef BNX2X_STOP_ON_ERROR
10019 if (unlikely(bp->panic))
10023 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10024 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10025 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10027 bnx2x_update_fpsb_idx(fp);
10029 if (bnx2x_has_tx_work(fp))
10030 bnx2x_tx_int(fp, budget);
10032 if (bnx2x_has_rx_work(fp))
10033 work_done = bnx2x_rx_int(fp, budget);
10034 rmb(); /* BNX2X_HAS_WORK() reads the status block */
10036 /* must not complete if we consumed full budget */
10037 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
10039 #ifdef BNX2X_STOP_ON_ERROR
10042 napi_complete(napi);
10044 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
10045 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10046 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
10047 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10053 /* we split the first BD into headers and data BDs
10054 * to ease the pain of our fellow microcode engineers
10055 * we use one mapping for both BDs
10056 * So far this has only been observed to happen
10057 * in Other Operating Systems(TM)
10059 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10060 struct bnx2x_fastpath *fp,
10061 struct eth_tx_bd **tx_bd, u16 hlen,
10062 u16 bd_prod, int nbd)
10064 struct eth_tx_bd *h_tx_bd = *tx_bd;
10065 struct eth_tx_bd *d_tx_bd;
10066 dma_addr_t mapping;
10067 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10069 /* first fix first BD */
10070 h_tx_bd->nbd = cpu_to_le16(nbd);
10071 h_tx_bd->nbytes = cpu_to_le16(hlen);
10073 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10074 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10075 h_tx_bd->addr_lo, h_tx_bd->nbd);
10077 /* now get a new data BD
10078 * (after the pbd) and fill it */
10079 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10080 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10082 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10083 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10085 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10086 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10087 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10089 /* this marks the BD as one that has no individual mapping
10090 * the FW ignores this flag in a BD not marked start
10092 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10093 DP(NETIF_MSG_TX_QUEUED,
10094 "TSO split data size is %d (%x:%x)\n",
10095 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10097 /* update tx_bd for marking the last BD flag */
10103 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10106 csum = (u16) ~csum_fold(csum_sub(csum,
10107 csum_partial(t_header - fix, fix, 0)));
10110 csum = (u16) ~csum_fold(csum_add(csum,
10111 csum_partial(t_header, -fix, 0)));
10113 return swab16(csum);
10116 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10120 if (skb->ip_summed != CHECKSUM_PARTIAL)
10124 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10126 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10127 rc |= XMIT_CSUM_TCP;
10131 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10132 rc |= XMIT_CSUM_TCP;
10136 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10139 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10145 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10146 /* check if packet requires linearization (packet is too fragmented) */
10147 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10152 int first_bd_sz = 0;
10154 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10155 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10157 if (xmit_type & XMIT_GSO) {
10158 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10159 /* Check if LSO packet needs to be copied:
10160 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10161 int wnd_size = MAX_FETCH_BD - 3;
10162 /* Number of windows to check */
10163 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10168 /* Headers length */
10169 hlen = (int)(skb_transport_header(skb) - skb->data) +
10172 /* Amount of data (w/o headers) on linear part of SKB*/
10173 first_bd_sz = skb_headlen(skb) - hlen;
10175 wnd_sum = first_bd_sz;
10177 /* Calculate the first sum - it's special */
10178 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10180 skb_shinfo(skb)->frags[frag_idx].size;
10182 /* If there was data on linear skb data - check it */
10183 if (first_bd_sz > 0) {
10184 if (unlikely(wnd_sum < lso_mss)) {
10189 wnd_sum -= first_bd_sz;
10192 /* Others are easier: run through the frag list and
10193 check all windows */
10194 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10196 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10198 if (unlikely(wnd_sum < lso_mss)) {
10203 skb_shinfo(skb)->frags[wnd_idx].size;
10207 /* in non-LSO too fragmented packet should always
10214 if (unlikely(to_copy))
10215 DP(NETIF_MSG_TX_QUEUED,
10216 "Linearization IS REQUIRED for %s packet. "
10217 "num_frags %d hlen %d first_bd_sz %d\n",
10218 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10219 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10225 /* called with netif_tx_lock
10226 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10227 * netif_wake_queue()
10229 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10231 struct bnx2x *bp = netdev_priv(dev);
10232 struct bnx2x_fastpath *fp;
10233 struct netdev_queue *txq;
10234 struct sw_tx_bd *tx_buf;
10235 struct eth_tx_bd *tx_bd;
10236 struct eth_tx_parse_bd *pbd = NULL;
10237 u16 pkt_prod, bd_prod;
10239 dma_addr_t mapping;
10240 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10241 int vlan_off = (bp->e1hov ? 4 : 0);
10245 #ifdef BNX2X_STOP_ON_ERROR
10246 if (unlikely(bp->panic))
10247 return NETDEV_TX_BUSY;
10250 fp_index = skb_get_queue_mapping(skb);
10251 txq = netdev_get_tx_queue(dev, fp_index);
10253 fp = &bp->fp[fp_index];
10255 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10256 fp->eth_q_stats.driver_xoff++,
10257 netif_tx_stop_queue(txq);
10258 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10259 return NETDEV_TX_BUSY;
10262 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10263 " gso type %x xmit_type %x\n",
10264 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10265 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10267 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10268 /* First, check if we need to linearize the skb
10269 (due to FW restrictions) */
10270 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10271 /* Statistics of linearization */
10273 if (skb_linearize(skb) != 0) {
10274 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10275 "silently dropping this SKB\n");
10276 dev_kfree_skb_any(skb);
10277 return NETDEV_TX_OK;
10283 Please read carefully. First we use one BD which we mark as start,
10284 then for TSO or xsum we have a parsing info BD,
10285 and only then we have the rest of the TSO BDs.
10286 (don't forget to mark the last one as last,
10287 and to unmap only AFTER you write to the BD ...)
10288 And above all, all pdb sizes are in words - NOT DWORDS!
10291 pkt_prod = fp->tx_pkt_prod++;
10292 bd_prod = TX_BD(fp->tx_bd_prod);
10294 /* get a tx_buf and first BD */
10295 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10296 tx_bd = &fp->tx_desc_ring[bd_prod];
10298 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10299 tx_bd->general_data = (UNICAST_ADDRESS <<
10300 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10302 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10304 /* remember the first BD of the packet */
10305 tx_buf->first_bd = fp->tx_bd_prod;
10308 DP(NETIF_MSG_TX_QUEUED,
10309 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10310 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10313 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10314 (bp->flags & HW_VLAN_TX_FLAG)) {
10315 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10316 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10320 tx_bd->vlan = cpu_to_le16(pkt_prod);
10323 /* turn on parsing and get a BD */
10324 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10325 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10327 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10330 if (xmit_type & XMIT_CSUM) {
10331 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10333 /* for now NS flag is not used in Linux */
10334 pbd->global_data = (hlen |
10335 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
10336 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10338 pbd->ip_hlen = (skb_transport_header(skb) -
10339 skb_network_header(skb)) / 2;
10341 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10343 pbd->total_hlen = cpu_to_le16(hlen);
10344 hlen = hlen*2 - vlan_off;
10346 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10348 if (xmit_type & XMIT_CSUM_V4)
10349 tx_bd->bd_flags.as_bitfield |=
10350 ETH_TX_BD_FLAGS_IP_CSUM;
10352 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10354 if (xmit_type & XMIT_CSUM_TCP) {
10355 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10358 s8 fix = SKB_CS_OFF(skb); /* signed! */
10360 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10361 pbd->cs_offset = fix / 2;
10363 DP(NETIF_MSG_TX_QUEUED,
10364 "hlen %d offset %d fix %d csum before fix %x\n",
10365 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10368 /* HW bug: fixup the CSUM */
10369 pbd->tcp_pseudo_csum =
10370 bnx2x_csum_fix(skb_transport_header(skb),
10373 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10374 pbd->tcp_pseudo_csum);
10378 mapping = pci_map_single(bp->pdev, skb->data,
10379 skb_headlen(skb), PCI_DMA_TODEVICE);
10381 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10382 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10383 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10384 tx_bd->nbd = cpu_to_le16(nbd);
10385 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10387 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10388 " nbytes %d flags %x vlan %x\n",
10389 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10390 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10391 le16_to_cpu(tx_bd->vlan));
10393 if (xmit_type & XMIT_GSO) {
10395 DP(NETIF_MSG_TX_QUEUED,
10396 "TSO packet len %d hlen %d total len %d tso size %d\n",
10397 skb->len, hlen, skb_headlen(skb),
10398 skb_shinfo(skb)->gso_size);
10400 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10402 if (unlikely(skb_headlen(skb) > hlen))
10403 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10406 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10407 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10408 pbd->tcp_flags = pbd_tcp_flags(skb);
10410 if (xmit_type & XMIT_GSO_V4) {
10411 pbd->ip_id = swab16(ip_hdr(skb)->id);
10412 pbd->tcp_pseudo_csum =
10413 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10414 ip_hdr(skb)->daddr,
10415 0, IPPROTO_TCP, 0));
10418 pbd->tcp_pseudo_csum =
10419 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10420 &ipv6_hdr(skb)->daddr,
10421 0, IPPROTO_TCP, 0));
10423 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10426 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10427 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10429 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10430 tx_bd = &fp->tx_desc_ring[bd_prod];
10432 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10433 frag->size, PCI_DMA_TODEVICE);
10435 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10436 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10437 tx_bd->nbytes = cpu_to_le16(frag->size);
10438 tx_bd->vlan = cpu_to_le16(pkt_prod);
10439 tx_bd->bd_flags.as_bitfield = 0;
10441 DP(NETIF_MSG_TX_QUEUED,
10442 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10443 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10444 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10447 /* now at last mark the BD as the last BD */
10448 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10450 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10451 tx_bd, tx_bd->bd_flags.as_bitfield);
10453 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10455 /* now send a tx doorbell, counting the next BD
10456 * if the packet contains or ends with it
10458 if (TX_BD_POFF(bd_prod) < nbd)
10462 DP(NETIF_MSG_TX_QUEUED,
10463 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10464 " tcp_flags %x xsum %x seq %u hlen %u\n",
10465 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10466 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10467 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10469 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10472 * Make sure that the BD data is updated before updating the producer
10473 * since FW might read the BD right after the producer is updated.
10474 * This is only applicable for weak-ordered memory model archs such
10475 * as IA-64. The following barrier is also mandatory since FW will
10476 * assumes packets must have BDs.
10480 fp->hw_tx_prods->bds_prod =
10481 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
10482 mb(); /* FW restriction: must not reorder writing nbd and packets */
10483 fp->hw_tx_prods->packets_prod =
10484 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
10485 DOORBELL(bp, FP_IDX(fp), 0);
10489 fp->tx_bd_prod += nbd;
10490 dev->trans_start = jiffies;
10492 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10493 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10494 if we put Tx into XOFF state. */
10496 netif_tx_stop_queue(txq);
10497 fp->eth_q_stats.driver_xoff++;
10498 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10499 netif_tx_wake_queue(txq);
10503 return NETDEV_TX_OK;
10506 /* called with rtnl_lock */
10507 static int bnx2x_open(struct net_device *dev)
10509 struct bnx2x *bp = netdev_priv(dev);
10511 netif_carrier_off(dev);
10513 bnx2x_set_power_state(bp, PCI_D0);
10515 return bnx2x_nic_load(bp, LOAD_OPEN);
10518 /* called with rtnl_lock */
10519 static int bnx2x_close(struct net_device *dev)
10521 struct bnx2x *bp = netdev_priv(dev);
10523 /* Unload the driver, release IRQs */
10524 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10525 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10526 if (!CHIP_REV_IS_SLOW(bp))
10527 bnx2x_set_power_state(bp, PCI_D3hot);
10532 /* called with netif_tx_lock from set_multicast */
10533 static void bnx2x_set_rx_mode(struct net_device *dev)
10535 struct bnx2x *bp = netdev_priv(dev);
10536 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10537 int port = BP_PORT(bp);
10539 if (bp->state != BNX2X_STATE_OPEN) {
10540 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10544 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10546 if (dev->flags & IFF_PROMISC)
10547 rx_mode = BNX2X_RX_MODE_PROMISC;
10549 else if ((dev->flags & IFF_ALLMULTI) ||
10550 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10551 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10553 else { /* some multicasts */
10554 if (CHIP_IS_E1(bp)) {
10555 int i, old, offset;
10556 struct dev_mc_list *mclist;
10557 struct mac_configuration_cmd *config =
10558 bnx2x_sp(bp, mcast_config);
10560 for (i = 0, mclist = dev->mc_list;
10561 mclist && (i < dev->mc_count);
10562 i++, mclist = mclist->next) {
10564 config->config_table[i].
10565 cam_entry.msb_mac_addr =
10566 swab16(*(u16 *)&mclist->dmi_addr[0]);
10567 config->config_table[i].
10568 cam_entry.middle_mac_addr =
10569 swab16(*(u16 *)&mclist->dmi_addr[2]);
10570 config->config_table[i].
10571 cam_entry.lsb_mac_addr =
10572 swab16(*(u16 *)&mclist->dmi_addr[4]);
10573 config->config_table[i].cam_entry.flags =
10575 config->config_table[i].
10576 target_table_entry.flags = 0;
10577 config->config_table[i].
10578 target_table_entry.client_id = 0;
10579 config->config_table[i].
10580 target_table_entry.vlan_id = 0;
10583 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10584 config->config_table[i].
10585 cam_entry.msb_mac_addr,
10586 config->config_table[i].
10587 cam_entry.middle_mac_addr,
10588 config->config_table[i].
10589 cam_entry.lsb_mac_addr);
10591 old = config->hdr.length;
10593 for (; i < old; i++) {
10594 if (CAM_IS_INVALID(config->
10595 config_table[i])) {
10596 /* already invalidated */
10600 CAM_INVALIDATE(config->
10605 if (CHIP_REV_IS_SLOW(bp))
10606 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10608 offset = BNX2X_MAX_MULTICAST*(1 + port);
10610 config->hdr.length = i;
10611 config->hdr.offset = offset;
10612 config->hdr.client_id = bp->fp->cl_id;
10613 config->hdr.reserved1 = 0;
10615 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10616 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10617 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10620 /* Accept one or more multicasts */
10621 struct dev_mc_list *mclist;
10622 u32 mc_filter[MC_HASH_SIZE];
10623 u32 crc, bit, regidx;
10626 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10628 for (i = 0, mclist = dev->mc_list;
10629 mclist && (i < dev->mc_count);
10630 i++, mclist = mclist->next) {
10632 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10635 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10636 bit = (crc >> 24) & 0xff;
10639 mc_filter[regidx] |= (1 << bit);
10642 for (i = 0; i < MC_HASH_SIZE; i++)
10643 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10648 bp->rx_mode = rx_mode;
10649 bnx2x_set_storm_rx_mode(bp);
10652 /* called with rtnl_lock */
10653 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10655 struct sockaddr *addr = p;
10656 struct bnx2x *bp = netdev_priv(dev);
10658 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10661 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10662 if (netif_running(dev)) {
10663 if (CHIP_IS_E1(bp))
10664 bnx2x_set_mac_addr_e1(bp, 1);
10666 bnx2x_set_mac_addr_e1h(bp, 1);
10672 /* called with rtnl_lock */
10673 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10675 struct mii_ioctl_data *data = if_mii(ifr);
10676 struct bnx2x *bp = netdev_priv(dev);
10677 int port = BP_PORT(bp);
10682 data->phy_id = bp->port.phy_addr;
10686 case SIOCGMIIREG: {
10689 if (!netif_running(dev))
10692 mutex_lock(&bp->port.phy_mutex);
10693 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10694 DEFAULT_PHY_DEV_ADDR,
10695 (data->reg_num & 0x1f), &mii_regval);
10696 data->val_out = mii_regval;
10697 mutex_unlock(&bp->port.phy_mutex);
10702 if (!capable(CAP_NET_ADMIN))
10705 if (!netif_running(dev))
10708 mutex_lock(&bp->port.phy_mutex);
10709 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10710 DEFAULT_PHY_DEV_ADDR,
10711 (data->reg_num & 0x1f), data->val_in);
10712 mutex_unlock(&bp->port.phy_mutex);
10720 return -EOPNOTSUPP;
10723 /* called with rtnl_lock */
10724 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10726 struct bnx2x *bp = netdev_priv(dev);
10729 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10730 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10733 /* This does not race with packet allocation
10734 * because the actual alloc size is
10735 * only updated as part of load
10737 dev->mtu = new_mtu;
10739 if (netif_running(dev)) {
10740 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10741 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10747 static void bnx2x_tx_timeout(struct net_device *dev)
10749 struct bnx2x *bp = netdev_priv(dev);
10751 #ifdef BNX2X_STOP_ON_ERROR
10755 /* This allows the netif to be shutdown gracefully before resetting */
10756 schedule_work(&bp->reset_task);
10760 /* called with rtnl_lock */
10761 static void bnx2x_vlan_rx_register(struct net_device *dev,
10762 struct vlan_group *vlgrp)
10764 struct bnx2x *bp = netdev_priv(dev);
10768 /* Set flags according to the required capabilities */
10769 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10771 if (dev->features & NETIF_F_HW_VLAN_TX)
10772 bp->flags |= HW_VLAN_TX_FLAG;
10774 if (dev->features & NETIF_F_HW_VLAN_RX)
10775 bp->flags |= HW_VLAN_RX_FLAG;
10777 if (netif_running(dev))
10778 bnx2x_set_client_config(bp);
10783 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10784 static void poll_bnx2x(struct net_device *dev)
10786 struct bnx2x *bp = netdev_priv(dev);
10788 disable_irq(bp->pdev->irq);
10789 bnx2x_interrupt(bp->pdev->irq, dev);
10790 enable_irq(bp->pdev->irq);
10794 static const struct net_device_ops bnx2x_netdev_ops = {
10795 .ndo_open = bnx2x_open,
10796 .ndo_stop = bnx2x_close,
10797 .ndo_start_xmit = bnx2x_start_xmit,
10798 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10799 .ndo_set_mac_address = bnx2x_change_mac_addr,
10800 .ndo_validate_addr = eth_validate_addr,
10801 .ndo_do_ioctl = bnx2x_ioctl,
10802 .ndo_change_mtu = bnx2x_change_mtu,
10803 .ndo_tx_timeout = bnx2x_tx_timeout,
10805 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10807 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10808 .ndo_poll_controller = poll_bnx2x,
10813 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10814 struct net_device *dev)
10819 SET_NETDEV_DEV(dev, &pdev->dev);
10820 bp = netdev_priv(dev);
10825 bp->func = PCI_FUNC(pdev->devfn);
10827 rc = pci_enable_device(pdev);
10829 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10833 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10834 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10837 goto err_out_disable;
10840 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10841 printk(KERN_ERR PFX "Cannot find second PCI device"
10842 " base address, aborting\n");
10844 goto err_out_disable;
10847 if (atomic_read(&pdev->enable_cnt) == 1) {
10848 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10850 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10852 goto err_out_disable;
10855 pci_set_master(pdev);
10856 pci_save_state(pdev);
10859 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10860 if (bp->pm_cap == 0) {
10861 printk(KERN_ERR PFX "Cannot find power management"
10862 " capability, aborting\n");
10864 goto err_out_release;
10867 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10868 if (bp->pcie_cap == 0) {
10869 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10872 goto err_out_release;
10875 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10876 bp->flags |= USING_DAC_FLAG;
10877 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10878 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10879 " failed, aborting\n");
10881 goto err_out_release;
10884 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10885 printk(KERN_ERR PFX "System does not support DMA,"
10888 goto err_out_release;
10891 dev->mem_start = pci_resource_start(pdev, 0);
10892 dev->base_addr = dev->mem_start;
10893 dev->mem_end = pci_resource_end(pdev, 0);
10895 dev->irq = pdev->irq;
10897 bp->regview = pci_ioremap_bar(pdev, 0);
10898 if (!bp->regview) {
10899 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10901 goto err_out_release;
10904 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10905 min_t(u64, BNX2X_DB_SIZE,
10906 pci_resource_len(pdev, 2)));
10907 if (!bp->doorbells) {
10908 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10910 goto err_out_unmap;
10913 bnx2x_set_power_state(bp, PCI_D0);
10915 /* clean indirect addresses */
10916 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10917 PCICFG_VENDOR_ID_OFFSET);
10918 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10919 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10920 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10921 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10923 dev->watchdog_timeo = TX_TIMEOUT;
10925 dev->netdev_ops = &bnx2x_netdev_ops;
10926 dev->ethtool_ops = &bnx2x_ethtool_ops;
10927 dev->features |= NETIF_F_SG;
10928 dev->features |= NETIF_F_HW_CSUM;
10929 if (bp->flags & USING_DAC_FLAG)
10930 dev->features |= NETIF_F_HIGHDMA;
10932 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10933 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10935 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10936 dev->features |= NETIF_F_TSO6;
10942 iounmap(bp->regview);
10943 bp->regview = NULL;
10945 if (bp->doorbells) {
10946 iounmap(bp->doorbells);
10947 bp->doorbells = NULL;
10951 if (atomic_read(&pdev->enable_cnt) == 1)
10952 pci_release_regions(pdev);
10955 pci_disable_device(pdev);
10956 pci_set_drvdata(pdev, NULL);
10962 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10964 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10966 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10970 /* return value of 1=2.5GHz 2=5GHz */
10971 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10973 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10975 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10979 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10980 const struct pci_device_id *ent)
10982 static int version_printed;
10983 struct net_device *dev = NULL;
10987 if (version_printed++ == 0)
10988 printk(KERN_INFO "%s", version);
10990 /* dev zeroed in init_etherdev */
10991 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10993 printk(KERN_ERR PFX "Cannot allocate net device\n");
10997 bp = netdev_priv(dev);
10998 bp->msglevel = debug;
11000 rc = bnx2x_init_dev(pdev, dev);
11006 pci_set_drvdata(pdev, dev);
11008 rc = bnx2x_init_bp(bp);
11010 goto init_one_exit;
11012 rc = register_netdev(dev);
11014 dev_err(&pdev->dev, "Cannot register net device\n");
11015 goto init_one_exit;
11018 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11019 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11020 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11021 bnx2x_get_pcie_width(bp),
11022 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11023 dev->base_addr, bp->pdev->irq);
11024 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11029 iounmap(bp->regview);
11032 iounmap(bp->doorbells);
11036 if (atomic_read(&pdev->enable_cnt) == 1)
11037 pci_release_regions(pdev);
11039 pci_disable_device(pdev);
11040 pci_set_drvdata(pdev, NULL);
11045 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11047 struct net_device *dev = pci_get_drvdata(pdev);
11051 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11054 bp = netdev_priv(dev);
11056 unregister_netdev(dev);
11059 iounmap(bp->regview);
11062 iounmap(bp->doorbells);
11066 if (atomic_read(&pdev->enable_cnt) == 1)
11067 pci_release_regions(pdev);
11069 pci_disable_device(pdev);
11070 pci_set_drvdata(pdev, NULL);
11073 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11075 struct net_device *dev = pci_get_drvdata(pdev);
11079 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11082 bp = netdev_priv(dev);
11086 pci_save_state(pdev);
11088 if (!netif_running(dev)) {
11093 netif_device_detach(dev);
11095 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11097 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11104 static int bnx2x_resume(struct pci_dev *pdev)
11106 struct net_device *dev = pci_get_drvdata(pdev);
11111 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11114 bp = netdev_priv(dev);
11118 pci_restore_state(pdev);
11120 if (!netif_running(dev)) {
11125 bnx2x_set_power_state(bp, PCI_D0);
11126 netif_device_attach(dev);
11128 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11135 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11139 bp->state = BNX2X_STATE_ERROR;
11141 bp->rx_mode = BNX2X_RX_MODE_NONE;
11143 bnx2x_netif_stop(bp, 0);
11145 del_timer_sync(&bp->timer);
11146 bp->stats_state = STATS_STATE_DISABLED;
11147 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11150 bnx2x_free_irq(bp);
11152 if (CHIP_IS_E1(bp)) {
11153 struct mac_configuration_cmd *config =
11154 bnx2x_sp(bp, mcast_config);
11156 for (i = 0; i < config->hdr.length; i++)
11157 CAM_INVALIDATE(config->config_table[i]);
11160 /* Free SKBs, SGEs, TPA pool and driver internals */
11161 bnx2x_free_skbs(bp);
11162 for_each_rx_queue(bp, i)
11163 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11164 for_each_rx_queue(bp, i)
11165 netif_napi_del(&bnx2x_fp(bp, i, napi));
11166 bnx2x_free_mem(bp);
11168 bp->state = BNX2X_STATE_CLOSED;
11170 netif_carrier_off(bp->dev);
11175 static void bnx2x_eeh_recover(struct bnx2x *bp)
11179 mutex_init(&bp->port.phy_mutex);
11181 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11182 bp->link_params.shmem_base = bp->common.shmem_base;
11183 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11185 if (!bp->common.shmem_base ||
11186 (bp->common.shmem_base < 0xA0000) ||
11187 (bp->common.shmem_base >= 0xC0000)) {
11188 BNX2X_DEV_INFO("MCP not active\n");
11189 bp->flags |= NO_MCP_FLAG;
11193 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11194 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11195 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11196 BNX2X_ERR("BAD MCP validity signature\n");
11198 if (!BP_NOMCP(bp)) {
11199 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11200 & DRV_MSG_SEQ_NUMBER_MASK);
11201 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11206 * bnx2x_io_error_detected - called when PCI error is detected
11207 * @pdev: Pointer to PCI device
11208 * @state: The current pci connection state
11210 * This function is called after a PCI bus error affecting
11211 * this device has been detected.
11213 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11214 pci_channel_state_t state)
11216 struct net_device *dev = pci_get_drvdata(pdev);
11217 struct bnx2x *bp = netdev_priv(dev);
11221 netif_device_detach(dev);
11223 if (netif_running(dev))
11224 bnx2x_eeh_nic_unload(bp);
11226 pci_disable_device(pdev);
11230 /* Request a slot reset */
11231 return PCI_ERS_RESULT_NEED_RESET;
11235 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11236 * @pdev: Pointer to PCI device
11238 * Restart the card from scratch, as if from a cold-boot.
11240 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11242 struct net_device *dev = pci_get_drvdata(pdev);
11243 struct bnx2x *bp = netdev_priv(dev);
11247 if (pci_enable_device(pdev)) {
11248 dev_err(&pdev->dev,
11249 "Cannot re-enable PCI device after reset\n");
11251 return PCI_ERS_RESULT_DISCONNECT;
11254 pci_set_master(pdev);
11255 pci_restore_state(pdev);
11257 if (netif_running(dev))
11258 bnx2x_set_power_state(bp, PCI_D0);
11262 return PCI_ERS_RESULT_RECOVERED;
11266 * bnx2x_io_resume - called when traffic can start flowing again
11267 * @pdev: Pointer to PCI device
11269 * This callback is called when the error recovery driver tells us that
11270 * its OK to resume normal operation.
11272 static void bnx2x_io_resume(struct pci_dev *pdev)
11274 struct net_device *dev = pci_get_drvdata(pdev);
11275 struct bnx2x *bp = netdev_priv(dev);
11279 bnx2x_eeh_recover(bp);
11281 if (netif_running(dev))
11282 bnx2x_nic_load(bp, LOAD_NORMAL);
11284 netif_device_attach(dev);
11289 static struct pci_error_handlers bnx2x_err_handler = {
11290 .error_detected = bnx2x_io_error_detected,
11291 .slot_reset = bnx2x_io_slot_reset,
11292 .resume = bnx2x_io_resume,
11295 static struct pci_driver bnx2x_pci_driver = {
11296 .name = DRV_MODULE_NAME,
11297 .id_table = bnx2x_pci_tbl,
11298 .probe = bnx2x_init_one,
11299 .remove = __devexit_p(bnx2x_remove_one),
11300 .suspend = bnx2x_suspend,
11301 .resume = bnx2x_resume,
11302 .err_handler = &bnx2x_err_handler,
11305 static int __init bnx2x_init(void)
11307 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11308 if (bnx2x_wq == NULL) {
11309 printk(KERN_ERR PFX "Cannot create workqueue\n");
11313 return pci_register_driver(&bnx2x_pci_driver);
11316 static void __exit bnx2x_cleanup(void)
11318 pci_unregister_driver(&bnx2x_pci_driver);
11320 destroy_workqueue(bnx2x_wq);
11323 module_init(bnx2x_init);
11324 module_exit(bnx2x_cleanup);