1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.26"
61 #define DRV_MODULE_RELDATE "2009/01/26"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
79 static int disable_tpa;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 module_param(disable_tpa, int, 0);
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
90 module_param(poll, int, 0);
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
93 MODULE_PARM_DESC(poll, "use polling (for debug)");
94 MODULE_PARM_DESC(debug, "default debug msglevel");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
614 REG_WR(bp, addr, val);
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
622 REG_WR(bp, addr, val);
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
629 /* enable nig and gpio3 attention */
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
639 static void bnx2x_int_disable(struct bnx2x *bp)
641 int port = BP_PORT(bp);
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
653 /* flush all outstanding writes */
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
661 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
666 /* disable interrupt handling */
667 atomic_inc(&bp->intr_sem);
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
672 /* make sure all ISRs are done */
674 synchronize_irq(bp->msix_table[0].vector);
676 for_each_queue(bp, i)
677 synchronize_irq(bp->msix_table[i + offset].vector);
679 synchronize_irq(bp->pdev->irq);
681 /* make sure sp_task is not running */
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
689 * General service functions
692 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
693 u8 storm, u16 index, u8 op, u8 update)
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
697 struct igu_ack_register igu_ack;
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
711 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
713 struct host_status_block *fpsb = fp->status_blk;
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
728 static u16 bnx2x_ack_int(struct bnx2x *bp)
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
742 * fast path service functions
745 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
749 /* Tell compiler that status block fields can change */
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
752 return (fp->tx_pkt_cons != tx_cons_sb);
755 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
757 /* Tell compiler that consumer and producer can change */
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
763 /* free skb in the packet ring at pos idx
764 * return idx of last bd freed
766 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
785 new_cons = nbd + tx_buf->first_bd;
786 #ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
788 BNX2X_ERR("BAD nbd!\n");
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
825 tx_buf->first_bd = 0;
831 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
837 barrier(); /* Tell compiler that prod and cons can change */
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
845 #ifdef BNX2X_STOP_ON_ERROR
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
851 return (s16)(fp->bp->tx_ring_size) - used;
854 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
856 struct bnx2x *bp = fp->bp;
857 struct netdev_queue *txq;
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
861 #ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
866 txq = netdev_get_tx_queue(bp->dev, fp->index);
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
870 while (sw_cons != hw_cons) {
873 pkt_cons = TX_BD(sw_cons);
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
878 hw_cons, sw_cons, pkt_cons);
880 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
903 /* TBD need a thresh? */
904 if (unlikely(netif_tx_queue_stopped(txq))) {
906 __netif_tx_lock(txq, smp_processor_id());
908 if ((netif_tx_queue_stopped(txq)) &&
909 (bp->state == BNX2X_STATE_OPEN) &&
910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
911 netif_tx_wake_queue(txq);
913 __netif_tx_unlock(txq);
918 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
938 fp->state = BNX2X_FP_STATE_OPEN;
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
944 fp->state = BNX2X_FP_STATE_HALTED;
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
952 mb(); /* force bnx2x_wait_ramrod() to see the change */
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
977 bp->set_mac_pending = 0;
980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
992 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
999 /* Skip "next page" elements */
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 sw_buf->page = NULL;
1012 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1021 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1029 if (unlikely(page == NULL))
1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1033 PCI_DMA_FROMDEVICE);
1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1048 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1061 PCI_DMA_FROMDEVICE);
1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1076 /* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1081 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1094 prod_rx_buf->skb = cons_rx_buf->skb;
1095 pci_unmap_addr_set(prod_rx_buf, mapping,
1096 pci_unmap_addr(cons_rx_buf, mapping));
1097 *prod_bd = *cons_bd;
1100 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1103 u16 last_max = fp->last_max_sge;
1105 if (SUB_S16(idx, last_max) > 0)
1106 fp->last_max_sge = idx;
1109 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1113 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1114 int idx = RX_SGE_CNT * i - 1;
1116 for (j = 0; j < 2; j++) {
1117 SGE_MASK_CLEAR_BIT(fp, idx);
1123 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1124 struct eth_fast_path_rx_cqe *fp_cqe)
1126 struct bnx2x *bp = fp->bp;
1127 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1128 le16_to_cpu(fp_cqe->len_on_bd)) >>
1130 u16 last_max, last_elem, first_elem;
1137 /* First mark all used pages */
1138 for (i = 0; i < sge_len; i++)
1139 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1141 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1142 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1144 /* Here we assume that the last SGE index is the biggest */
1145 prefetch((void *)(fp->sge_mask));
1146 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148 last_max = RX_SGE(fp->last_max_sge);
1149 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1150 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1152 /* If ring is not full */
1153 if (last_elem + 1 != first_elem)
1156 /* Now update the prod */
1157 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1158 if (likely(fp->sge_mask[i]))
1161 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1162 delta += RX_SGE_MASK_ELEM_SZ;
1166 fp->rx_sge_prod += delta;
1167 /* clear page-end entries */
1168 bnx2x_clear_sge_mask_next_elems(fp);
1171 DP(NETIF_MSG_RX_STATUS,
1172 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1173 fp->last_max_sge, fp->rx_sge_prod);
1176 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1178 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1179 memset(fp->sge_mask, 0xff,
1180 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1182 /* Clear the two last indices in the page to 1:
1183 these are the indices that correspond to the "next" element,
1184 hence will never be indicated and should be removed from
1185 the calculations. */
1186 bnx2x_clear_sge_mask_next_elems(fp);
1189 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1190 struct sk_buff *skb, u16 cons, u16 prod)
1192 struct bnx2x *bp = fp->bp;
1193 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1194 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1195 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1198 /* move empty skb from pool to prod and map it */
1199 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1200 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1201 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1202 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1204 /* move partial skb from cons to pool (don't unmap yet) */
1205 fp->tpa_pool[queue] = *cons_rx_buf;
1207 /* mark bin state as start - print error if current state != stop */
1208 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1209 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1211 fp->tpa_state[queue] = BNX2X_TPA_START;
1213 /* point prod_bd to new skb */
1214 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1215 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1217 #ifdef BNX2X_STOP_ON_ERROR
1218 fp->tpa_queue_used |= (1 << queue);
1219 #ifdef __powerpc64__
1220 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1222 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1224 fp->tpa_queue_used);
1228 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1229 struct sk_buff *skb,
1230 struct eth_fast_path_rx_cqe *fp_cqe,
1233 struct sw_rx_page *rx_pg, old_rx_pg;
1234 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1235 u32 i, frag_len, frag_size, pages;
1239 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1240 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1242 /* This is needed in order to enable forwarding support */
1244 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1245 max(frag_size, (u32)len_on_bd));
1247 #ifdef BNX2X_STOP_ON_ERROR
1249 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1250 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1252 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1253 fp_cqe->pkt_len, len_on_bd);
1259 /* Run through the SGL and compose the fragmented skb */
1260 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1261 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1263 /* FW gives the indices of the SGE as if the ring is an array
1264 (meaning that "next" element will consume 2 indices) */
1265 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1266 rx_pg = &fp->rx_page_ring[sge_idx];
1269 /* If we fail to allocate a substitute page, we simply stop
1270 where we are and drop the whole packet */
1271 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1272 if (unlikely(err)) {
1273 fp->eth_q_stats.rx_skb_alloc_failed++;
1277 /* Unmap the page as we r going to pass it to the stack */
1278 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1279 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1281 /* Add one frag and update the appropriate fields in the skb */
1282 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1284 skb->data_len += frag_len;
1285 skb->truesize += frag_len;
1286 skb->len += frag_len;
1288 frag_size -= frag_len;
1294 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1298 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1299 struct sk_buff *skb = rx_buf->skb;
1301 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1303 /* Unmap skb in the pool anyway, as we are going to change
1304 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1306 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1307 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1309 if (likely(new_skb)) {
1310 /* fix ip xsum and give it to the stack */
1311 /* (no need to map the new skb) */
1314 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1315 PARSING_FLAGS_VLAN);
1316 int is_not_hwaccel_vlan_cqe =
1317 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1321 prefetch(((char *)(skb)) + 128);
1323 #ifdef BNX2X_STOP_ON_ERROR
1324 if (pad + len > bp->rx_buf_size) {
1325 BNX2X_ERR("skb_put is about to fail... "
1326 "pad %d len %d rx_buf_size %d\n",
1327 pad, len, bp->rx_buf_size);
1333 skb_reserve(skb, pad);
1336 skb->protocol = eth_type_trans(skb, bp->dev);
1337 skb->ip_summed = CHECKSUM_UNNECESSARY;
1342 iph = (struct iphdr *)skb->data;
1344 /* If there is no Rx VLAN offloading -
1345 take VLAN tag into an account */
1346 if (unlikely(is_not_hwaccel_vlan_cqe))
1347 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1350 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1353 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1354 &cqe->fast_path_cqe, cqe_idx)) {
1356 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1357 (!is_not_hwaccel_vlan_cqe))
1358 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1359 le16_to_cpu(cqe->fast_path_cqe.
1363 netif_receive_skb(skb);
1365 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1366 " - dropping packet!\n");
1371 /* put new skb in bin */
1372 fp->tpa_pool[queue].skb = new_skb;
1375 /* else drop the packet and keep the buffer in the bin */
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Failed to allocate new skb - dropping packet!\n");
1378 fp->eth_q_stats.rx_skb_alloc_failed++;
1381 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1384 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1385 struct bnx2x_fastpath *fp,
1386 u16 bd_prod, u16 rx_comp_prod,
1389 struct ustorm_eth_rx_producers rx_prods = {0};
1392 /* Update producers */
1393 rx_prods.bd_prod = bd_prod;
1394 rx_prods.cqe_prod = rx_comp_prod;
1395 rx_prods.sge_prod = rx_sge_prod;
1398 * Make sure that the BD and SGE data is updated before updating the
1399 * producers since FW might read the BD/SGE right after the producer
1401 * This is only applicable for weak-ordered memory model archs such
1402 * as IA-64. The following barrier is also mandatory since FW will
1403 * assumes BDs must have buffers.
1407 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1408 REG_WR(bp, BAR_USTRORM_INTMEM +
1409 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1410 ((u32 *)&rx_prods)[i]);
1412 mmiowb(); /* keep prod updates ordered */
1414 DP(NETIF_MSG_RX_STATUS,
1415 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1416 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1419 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1421 struct bnx2x *bp = fp->bp;
1422 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1423 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1426 #ifdef BNX2X_STOP_ON_ERROR
1427 if (unlikely(bp->panic))
1431 /* CQ "next element" is of the size of the regular element,
1432 that's why it's ok here */
1433 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1434 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1437 bd_cons = fp->rx_bd_cons;
1438 bd_prod = fp->rx_bd_prod;
1439 bd_prod_fw = bd_prod;
1440 sw_comp_cons = fp->rx_comp_cons;
1441 sw_comp_prod = fp->rx_comp_prod;
1443 /* Memory barrier necessary as speculative reads of the rx
1444 * buffer can be ahead of the index in the status block
1448 DP(NETIF_MSG_RX_STATUS,
1449 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1450 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1452 while (sw_comp_cons != hw_comp_cons) {
1453 struct sw_rx_bd *rx_buf = NULL;
1454 struct sk_buff *skb;
1455 union eth_rx_cqe *cqe;
1459 comp_ring_cons = RCQ_BD(sw_comp_cons);
1460 bd_prod = RX_BD(bd_prod);
1461 bd_cons = RX_BD(bd_cons);
1463 cqe = &fp->rx_comp_ring[comp_ring_cons];
1464 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1466 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1467 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1468 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1469 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1470 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1471 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1473 /* is this a slowpath msg? */
1474 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1475 bnx2x_sp_event(fp, cqe);
1478 /* this is an rx packet */
1480 rx_buf = &fp->rx_buf_ring[bd_cons];
1482 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1483 pad = cqe->fast_path_cqe.placement_offset;
1485 /* If CQE is marked both TPA_START and TPA_END
1486 it is a non-TPA CQE */
1487 if ((!fp->disable_tpa) &&
1488 (TPA_TYPE(cqe_fp_flags) !=
1489 (TPA_TYPE_START | TPA_TYPE_END))) {
1490 u16 queue = cqe->fast_path_cqe.queue_index;
1492 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1493 DP(NETIF_MSG_RX_STATUS,
1494 "calling tpa_start on queue %d\n",
1497 bnx2x_tpa_start(fp, queue, skb,
1502 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1503 DP(NETIF_MSG_RX_STATUS,
1504 "calling tpa_stop on queue %d\n",
1507 if (!BNX2X_RX_SUM_FIX(cqe))
1508 BNX2X_ERR("STOP on none TCP "
1511 /* This is a size of the linear data
1513 len = le16_to_cpu(cqe->fast_path_cqe.
1515 bnx2x_tpa_stop(bp, fp, queue, pad,
1516 len, cqe, comp_ring_cons);
1517 #ifdef BNX2X_STOP_ON_ERROR
1522 bnx2x_update_sge_prod(fp,
1523 &cqe->fast_path_cqe);
1528 pci_dma_sync_single_for_device(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 pad + RX_COPY_THRESH,
1531 PCI_DMA_FROMDEVICE);
1533 prefetch(((char *)(skb)) + 128);
1535 /* is this an error packet? */
1536 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1537 DP(NETIF_MSG_RX_ERR,
1538 "ERROR flags %x rx packet %u\n",
1539 cqe_fp_flags, sw_comp_cons);
1540 fp->eth_q_stats.rx_err_discard_pkt++;
1544 /* Since we don't have a jumbo ring
1545 * copy small packets if mtu > 1500
1547 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1548 (len <= RX_COPY_THRESH)) {
1549 struct sk_buff *new_skb;
1551 new_skb = netdev_alloc_skb(bp->dev,
1553 if (new_skb == NULL) {
1554 DP(NETIF_MSG_RX_ERR,
1555 "ERROR packet dropped "
1556 "because of alloc failure\n");
1557 fp->eth_q_stats.rx_skb_alloc_failed++;
1562 skb_copy_from_linear_data_offset(skb, pad,
1563 new_skb->data + pad, len);
1564 skb_reserve(new_skb, pad);
1565 skb_put(new_skb, len);
1567 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1571 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1572 pci_unmap_single(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
1575 PCI_DMA_FROMDEVICE);
1576 skb_reserve(skb, pad);
1580 DP(NETIF_MSG_RX_ERR,
1581 "ERROR packet dropped because "
1582 "of alloc failure\n");
1583 fp->eth_q_stats.rx_skb_alloc_failed++;
1585 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1589 skb->protocol = eth_type_trans(skb, bp->dev);
1591 skb->ip_summed = CHECKSUM_NONE;
1593 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1594 skb->ip_summed = CHECKSUM_UNNECESSARY;
1596 fp->eth_q_stats.hw_csum_err++;
1600 skb_record_rx_queue(skb, fp->index);
1602 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1603 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1604 PARSING_FLAGS_VLAN))
1605 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1609 netif_receive_skb(skb);
1615 bd_cons = NEXT_RX_IDX(bd_cons);
1616 bd_prod = NEXT_RX_IDX(bd_prod);
1617 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1620 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1621 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1623 if (rx_pkt == budget)
1627 fp->rx_bd_cons = bd_cons;
1628 fp->rx_bd_prod = bd_prod_fw;
1629 fp->rx_comp_cons = sw_comp_cons;
1630 fp->rx_comp_prod = sw_comp_prod;
1632 /* Update producers */
1633 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1636 fp->rx_pkt += rx_pkt;
1642 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1644 struct bnx2x_fastpath *fp = fp_cookie;
1645 struct bnx2x *bp = fp->bp;
1646 int index = FP_IDX(fp);
1648 /* Return here if interrupt is disabled */
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1654 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1655 index, FP_SB_ID(fp));
1656 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1658 #ifdef BNX2X_STOP_ON_ERROR
1659 if (unlikely(bp->panic))
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668 napi_schedule(&bnx2x_fp(bp, index, napi));
1673 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1675 struct bnx2x *bp = netdev_priv(dev_instance);
1676 u16 status = bnx2x_ack_int(bp);
1679 /* Return here if interrupt is shared and it's not for us */
1680 if (unlikely(status == 0)) {
1681 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1684 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1686 /* Return here if interrupt is disabled */
1687 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1688 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1692 #ifdef BNX2X_STOP_ON_ERROR
1693 if (unlikely(bp->panic))
1697 mask = 0x2 << bp->fp[0].sb_id;
1698 if (status & mask) {
1699 struct bnx2x_fastpath *fp = &bp->fp[0];
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(fp->tx_cons_sb);
1703 prefetch(&fp->status_blk->c_status_block.status_block_index);
1704 prefetch(&fp->status_blk->u_status_block.status_block_index);
1706 napi_schedule(&bnx2x_fp(bp, 0, napi));
1712 if (unlikely(status & 0x1)) {
1713 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1721 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1727 /* end of fast path */
1729 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1734 * General service functions
1737 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1740 u32 resource_bit = (1 << resource);
1741 int func = BP_FUNC(bp);
1742 u32 hw_lock_control_reg;
1745 /* Validating that the resource is within range */
1746 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1749 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1754 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1756 hw_lock_control_reg =
1757 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1760 /* Validating that the resource is not already taken */
1761 lock_status = REG_RD(bp, hw_lock_control_reg);
1762 if (lock_status & resource_bit) {
1763 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1764 lock_status, resource_bit);
1768 /* Try for 5 second every 5ms */
1769 for (cnt = 0; cnt < 1000; cnt++) {
1770 /* Try to acquire the lock */
1771 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1772 lock_status = REG_RD(bp, hw_lock_control_reg);
1773 if (lock_status & resource_bit)
1778 DP(NETIF_MSG_HW, "Timeout\n");
1782 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1785 u32 resource_bit = (1 << resource);
1786 int func = BP_FUNC(bp);
1787 u32 hw_lock_control_reg;
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1804 /* Validating that the resource is currently taken */
1805 lock_status = REG_RD(bp, hw_lock_control_reg);
1806 if (!(lock_status & resource_bit)) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1812 REG_WR(bp, hw_lock_control_reg, resource_bit);
1816 /* HW Lock for shared dual port PHYs */
1817 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1819 mutex_lock(&bp->port.phy_mutex);
1821 if (bp->port.need_hw_lock)
1822 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1825 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1827 if (bp->port.need_hw_lock)
1828 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1830 mutex_unlock(&bp->port.phy_mutex);
1833 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1835 /* The GPIO should be swapped if swap register is set and active */
1836 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1837 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1838 int gpio_shift = gpio_num +
1839 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1840 u32 gpio_mask = (1 << gpio_shift);
1844 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1845 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1849 /* read GPIO value */
1850 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1852 /* get the requested pin value */
1853 if ((gpio_reg & gpio_mask) == gpio_mask)
1858 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1863 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1865 /* The GPIO should be swapped if swap register is set and active */
1866 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1867 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1868 int gpio_shift = gpio_num +
1869 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870 u32 gpio_mask = (1 << gpio_shift);
1873 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1874 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1878 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1879 /* read GPIO and mask except the float bits */
1880 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1883 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1884 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1885 gpio_num, gpio_shift);
1886 /* clear FLOAT and set CLR */
1887 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1888 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1891 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1892 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1893 gpio_num, gpio_shift);
1894 /* clear FLOAT and set SET */
1895 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1896 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1899 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1900 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1901 gpio_num, gpio_shift);
1903 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1910 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1911 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1916 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1918 /* The GPIO should be swapped if swap register is set and active */
1919 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1920 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1921 int gpio_shift = gpio_num +
1922 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1923 u32 gpio_mask = (1 << gpio_shift);
1926 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1927 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1931 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1933 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1936 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1937 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1938 "output low\n", gpio_num, gpio_shift);
1939 /* clear SET and set CLR */
1940 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1941 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1944 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1945 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1946 "output high\n", gpio_num, gpio_shift);
1947 /* clear CLR and set SET */
1948 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1949 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1956 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1957 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1962 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1964 u32 spio_mask = (1 << spio_num);
1967 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1968 (spio_num > MISC_REGISTERS_SPIO_7)) {
1969 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1973 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1974 /* read SPIO and mask except the float bits */
1975 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1978 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1979 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1980 /* clear FLOAT and set CLR */
1981 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1982 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1985 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1986 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1987 /* clear FLOAT and set SET */
1988 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1989 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1992 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1993 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1995 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2002 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2003 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2008 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2010 switch (bp->link_vars.ieee_fc &
2011 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2012 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2013 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2016 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2017 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2020 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2021 bp->port.advertising |= ADVERTISED_Asym_Pause;
2024 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2030 static void bnx2x_link_report(struct bnx2x *bp)
2032 if (bp->link_vars.link_up) {
2033 if (bp->state == BNX2X_STATE_OPEN)
2034 netif_carrier_on(bp->dev);
2035 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2037 printk("%d Mbps ", bp->link_vars.line_speed);
2039 if (bp->link_vars.duplex == DUPLEX_FULL)
2040 printk("full duplex");
2042 printk("half duplex");
2044 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2045 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2046 printk(", receive ");
2047 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2048 printk("& transmit ");
2050 printk(", transmit ");
2052 printk("flow control ON");
2056 } else { /* link_down */
2057 netif_carrier_off(bp->dev);
2058 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2062 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2064 if (!BP_NOMCP(bp)) {
2067 /* Initialize link parameters structure variables */
2068 /* It is recommended to turn off RX FC for jumbo frames
2069 for better performance */
2071 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2072 else if (bp->dev->mtu > 5000)
2073 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2075 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2077 bnx2x_acquire_phy_lock(bp);
2078 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2079 bnx2x_release_phy_lock(bp);
2081 bnx2x_calc_fc_adv(bp);
2083 if (bp->link_vars.link_up)
2084 bnx2x_link_report(bp);
2089 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2093 static void bnx2x_link_set(struct bnx2x *bp)
2095 if (!BP_NOMCP(bp)) {
2096 bnx2x_acquire_phy_lock(bp);
2097 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2098 bnx2x_release_phy_lock(bp);
2100 bnx2x_calc_fc_adv(bp);
2102 BNX2X_ERR("Bootcode is missing -not setting link\n");
2105 static void bnx2x__link_reset(struct bnx2x *bp)
2107 if (!BP_NOMCP(bp)) {
2108 bnx2x_acquire_phy_lock(bp);
2109 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2110 bnx2x_release_phy_lock(bp);
2112 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2115 static u8 bnx2x_link_test(struct bnx2x *bp)
2119 bnx2x_acquire_phy_lock(bp);
2120 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2121 bnx2x_release_phy_lock(bp);
2126 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2128 u32 r_param = bp->link_vars.line_speed / 8;
2129 u32 fair_periodic_timeout_usec;
2132 memset(&(bp->cmng.rs_vars), 0,
2133 sizeof(struct rate_shaping_vars_per_port));
2134 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2136 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2137 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2139 /* this is the threshold below which no timer arming will occur
2140 1.25 coefficient is for the threshold to be a little bigger
2141 than the real time, to compensate for timer in-accuracy */
2142 bp->cmng.rs_vars.rs_threshold =
2143 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2145 /* resolution of fairness timer */
2146 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2147 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2148 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2150 /* this is the threshold below which we won't arm the timer anymore */
2151 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2153 /* we multiply by 1e3/8 to get bytes/msec.
2154 We don't want the credits to pass a credit
2155 of the t_fair*FAIR_MEM (algorithm resolution) */
2156 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2157 /* since each tick is 4 usec */
2158 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2161 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2163 struct rate_shaping_vars_per_vn m_rs_vn;
2164 struct fairness_vars_per_vn m_fair_vn;
2165 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2166 u16 vn_min_rate, vn_max_rate;
2169 /* If function is hidden - set min and max to zeroes */
2170 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2175 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2176 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2177 /* If fairness is enabled (not all min rates are zeroes) and
2178 if current min rate is zero - set it to 1.
2179 This is a requirement of the algorithm. */
2180 if (bp->vn_weight_sum && (vn_min_rate == 0))
2181 vn_min_rate = DEF_MIN_RATE;
2182 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2183 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2187 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2188 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2190 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2191 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2193 /* global vn counter - maximal Mbps for this vn */
2194 m_rs_vn.vn_counter.rate = vn_max_rate;
2196 /* quota - number of bytes transmitted in this period */
2197 m_rs_vn.vn_counter.quota =
2198 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2200 if (bp->vn_weight_sum) {
2201 /* credit for each period of the fairness algorithm:
2202 number of bytes in T_FAIR (the vn share the port rate).
2203 vn_weight_sum should not be larger than 10000, thus
2204 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2206 m_fair_vn.vn_credit_delta =
2207 max((u32)(vn_min_rate * (T_FAIR_COEF /
2208 (8 * bp->vn_weight_sum))),
2209 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2210 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2211 m_fair_vn.vn_credit_delta);
2214 /* Store it to internal memory */
2215 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2216 REG_WR(bp, BAR_XSTRORM_INTMEM +
2217 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2218 ((u32 *)(&m_rs_vn))[i]);
2220 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2221 REG_WR(bp, BAR_XSTRORM_INTMEM +
2222 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2223 ((u32 *)(&m_fair_vn))[i]);
2227 /* This function is called upon link interrupt */
2228 static void bnx2x_link_attn(struct bnx2x *bp)
2230 /* Make sure that we are synced with the current statistics */
2231 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2233 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2235 if (bp->link_vars.link_up) {
2237 /* dropless flow control */
2238 if (CHIP_IS_E1H(bp)) {
2239 int port = BP_PORT(bp);
2240 u32 pause_enabled = 0;
2242 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2245 REG_WR(bp, BAR_USTRORM_INTMEM +
2246 USTORM_PAUSE_ENABLED_OFFSET(port),
2250 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2251 struct host_port_stats *pstats;
2253 pstats = bnx2x_sp(bp, port_stats);
2254 /* reset old bmac stats */
2255 memset(&(pstats->mac_stx[0]), 0,
2256 sizeof(struct mac_stx));
2258 if ((bp->state == BNX2X_STATE_OPEN) ||
2259 (bp->state == BNX2X_STATE_DISABLED))
2260 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2263 /* indicate link status */
2264 bnx2x_link_report(bp);
2267 int port = BP_PORT(bp);
2271 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2272 if (vn == BP_E1HVN(bp))
2275 func = ((vn << 1) | port);
2277 /* Set the attention towards other drivers
2279 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2280 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2283 if (bp->link_vars.link_up) {
2286 /* Init rate shaping and fairness contexts */
2287 bnx2x_init_port_minmax(bp);
2289 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2290 bnx2x_init_vn_minmax(bp, 2*vn + port);
2292 /* Store it to internal memory */
2294 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2295 REG_WR(bp, BAR_XSTRORM_INTMEM +
2296 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2297 ((u32 *)(&bp->cmng))[i]);
2302 static void bnx2x__link_status_update(struct bnx2x *bp)
2304 if (bp->state != BNX2X_STATE_OPEN)
2307 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2309 if (bp->link_vars.link_up)
2310 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2312 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2314 /* indicate link status */
2315 bnx2x_link_report(bp);
2318 static void bnx2x_pmf_update(struct bnx2x *bp)
2320 int port = BP_PORT(bp);
2324 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2326 /* enable nig attention */
2327 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2328 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2329 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2331 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2339 * General service functions
2342 /* the slow path queue is odd since completions arrive on the fastpath ring */
2343 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2344 u32 data_hi, u32 data_lo, int common)
2346 int func = BP_FUNC(bp);
2348 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2349 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2350 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2351 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2352 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2354 #ifdef BNX2X_STOP_ON_ERROR
2355 if (unlikely(bp->panic))
2359 spin_lock_bh(&bp->spq_lock);
2361 if (!bp->spq_left) {
2362 BNX2X_ERR("BUG! SPQ ring full!\n");
2363 spin_unlock_bh(&bp->spq_lock);
2368 /* CID needs port number to be encoded int it */
2369 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2370 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2372 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2374 bp->spq_prod_bd->hdr.type |=
2375 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2377 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2378 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2382 if (bp->spq_prod_bd == bp->spq_last_bd) {
2383 bp->spq_prod_bd = bp->spq;
2384 bp->spq_prod_idx = 0;
2385 DP(NETIF_MSG_TIMER, "end of spq\n");
2392 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2395 spin_unlock_bh(&bp->spq_lock);
2399 /* acquire split MCP access lock register */
2400 static int bnx2x_acquire_alr(struct bnx2x *bp)
2407 for (j = 0; j < i*10; j++) {
2409 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2410 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2411 if (val & (1L << 31))
2416 if (!(val & (1L << 31))) {
2417 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2424 /* release split MCP access lock register */
2425 static void bnx2x_release_alr(struct bnx2x *bp)
2429 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2432 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2434 struct host_def_status_block *def_sb = bp->def_status_blk;
2437 barrier(); /* status block is written to by the chip */
2438 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2439 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2442 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2443 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2446 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2447 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2450 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2451 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2454 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2455 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2462 * slow path service functions
2465 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2467 int port = BP_PORT(bp);
2468 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2469 COMMAND_REG_ATTN_BITS_SET);
2470 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2471 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2472 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2473 NIG_REG_MASK_INTERRUPT_PORT0;
2477 if (bp->attn_state & asserted)
2478 BNX2X_ERR("IGU ERROR\n");
2480 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2481 aeu_mask = REG_RD(bp, aeu_addr);
2483 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2484 aeu_mask, asserted);
2485 aeu_mask &= ~(asserted & 0xff);
2486 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2488 REG_WR(bp, aeu_addr, aeu_mask);
2489 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2491 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2492 bp->attn_state |= asserted;
2493 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2495 if (asserted & ATTN_HARD_WIRED_MASK) {
2496 if (asserted & ATTN_NIG_FOR_FUNC) {
2498 bnx2x_acquire_phy_lock(bp);
2500 /* save nig interrupt mask */
2501 nig_mask = REG_RD(bp, nig_int_mask_addr);
2502 REG_WR(bp, nig_int_mask_addr, 0);
2504 bnx2x_link_attn(bp);
2506 /* handle unicore attn? */
2508 if (asserted & ATTN_SW_TIMER_4_FUNC)
2509 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2511 if (asserted & GPIO_2_FUNC)
2512 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2514 if (asserted & GPIO_3_FUNC)
2515 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2517 if (asserted & GPIO_4_FUNC)
2518 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2521 if (asserted & ATTN_GENERAL_ATTN_1) {
2522 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2525 if (asserted & ATTN_GENERAL_ATTN_2) {
2526 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2527 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2529 if (asserted & ATTN_GENERAL_ATTN_3) {
2530 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2531 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2534 if (asserted & ATTN_GENERAL_ATTN_4) {
2535 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2536 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2538 if (asserted & ATTN_GENERAL_ATTN_5) {
2539 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2540 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2542 if (asserted & ATTN_GENERAL_ATTN_6) {
2543 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2544 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2548 } /* if hardwired */
2550 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2552 REG_WR(bp, hc_addr, asserted);
2554 /* now set back the mask */
2555 if (asserted & ATTN_NIG_FOR_FUNC) {
2556 REG_WR(bp, nig_int_mask_addr, nig_mask);
2557 bnx2x_release_phy_lock(bp);
2561 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2563 int port = BP_PORT(bp);
2567 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2568 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2570 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2572 val = REG_RD(bp, reg_offset);
2573 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2574 REG_WR(bp, reg_offset, val);
2576 BNX2X_ERR("SPIO5 hw attention\n");
2578 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2579 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2580 /* Fan failure attention */
2582 /* The PHY reset is controlled by GPIO 1 */
2583 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2584 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2585 /* Low power mode is controlled by GPIO 2 */
2586 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2587 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2588 /* mark the failure */
2589 bp->link_params.ext_phy_config &=
2590 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2591 bp->link_params.ext_phy_config |=
2592 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2594 dev_info.port_hw_config[port].
2595 external_phy_config,
2596 bp->link_params.ext_phy_config);
2597 /* log the failure */
2598 printk(KERN_ERR PFX "Fan Failure on Network"
2599 " Controller %s has caused the driver to"
2600 " shutdown the card to prevent permanent"
2601 " damage. Please contact Dell Support for"
2602 " assistance\n", bp->dev->name);
2610 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2611 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2612 bnx2x_acquire_phy_lock(bp);
2613 bnx2x_handle_module_detect_int(&bp->link_params);
2614 bnx2x_release_phy_lock(bp);
2617 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2619 val = REG_RD(bp, reg_offset);
2620 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2621 REG_WR(bp, reg_offset, val);
2623 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2624 (attn & HW_INTERRUT_ASSERT_SET_0));
2629 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2633 if (attn & BNX2X_DOORQ_ASSERT) {
2635 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2636 BNX2X_ERR("DB hw attention 0x%x\n", val);
2637 /* DORQ discard attention */
2639 BNX2X_ERR("FATAL error from DORQ\n");
2642 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2644 int port = BP_PORT(bp);
2647 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2648 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2650 val = REG_RD(bp, reg_offset);
2651 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2652 REG_WR(bp, reg_offset, val);
2654 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2655 (attn & HW_INTERRUT_ASSERT_SET_1));
2660 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2664 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2666 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2667 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2668 /* CFC error attention */
2670 BNX2X_ERR("FATAL error from CFC\n");
2673 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2675 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2676 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2677 /* RQ_USDMDP_FIFO_OVERFLOW */
2679 BNX2X_ERR("FATAL error from PXP\n");
2682 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2684 int port = BP_PORT(bp);
2687 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2688 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2690 val = REG_RD(bp, reg_offset);
2691 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2692 REG_WR(bp, reg_offset, val);
2694 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2695 (attn & HW_INTERRUT_ASSERT_SET_2));
2700 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2704 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2706 if (attn & BNX2X_PMF_LINK_ASSERT) {
2707 int func = BP_FUNC(bp);
2709 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2710 bnx2x__link_status_update(bp);
2711 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2713 bnx2x_pmf_update(bp);
2715 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2717 BNX2X_ERR("MC assert!\n");
2718 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2721 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2724 } else if (attn & BNX2X_MCP_ASSERT) {
2726 BNX2X_ERR("MCP assert!\n");
2727 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2731 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2734 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2735 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2736 if (attn & BNX2X_GRC_TIMEOUT) {
2737 val = CHIP_IS_E1H(bp) ?
2738 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2739 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2741 if (attn & BNX2X_GRC_RSV) {
2742 val = CHIP_IS_E1H(bp) ?
2743 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2744 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2746 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2750 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2752 struct attn_route attn;
2753 struct attn_route group_mask;
2754 int port = BP_PORT(bp);
2760 /* need to take HW lock because MCP or other port might also
2761 try to handle this event */
2762 bnx2x_acquire_alr(bp);
2764 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2765 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2766 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2767 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2768 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2769 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2771 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2772 if (deasserted & (1 << index)) {
2773 group_mask = bp->attn_group[index];
2775 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2776 index, group_mask.sig[0], group_mask.sig[1],
2777 group_mask.sig[2], group_mask.sig[3]);
2779 bnx2x_attn_int_deasserted3(bp,
2780 attn.sig[3] & group_mask.sig[3]);
2781 bnx2x_attn_int_deasserted1(bp,
2782 attn.sig[1] & group_mask.sig[1]);
2783 bnx2x_attn_int_deasserted2(bp,
2784 attn.sig[2] & group_mask.sig[2]);
2785 bnx2x_attn_int_deasserted0(bp,
2786 attn.sig[0] & group_mask.sig[0]);
2788 if ((attn.sig[0] & group_mask.sig[0] &
2789 HW_PRTY_ASSERT_SET_0) ||
2790 (attn.sig[1] & group_mask.sig[1] &
2791 HW_PRTY_ASSERT_SET_1) ||
2792 (attn.sig[2] & group_mask.sig[2] &
2793 HW_PRTY_ASSERT_SET_2))
2794 BNX2X_ERR("FATAL HW block parity attention\n");
2798 bnx2x_release_alr(bp);
2800 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2803 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2805 REG_WR(bp, reg_addr, val);
2807 if (~bp->attn_state & deasserted)
2808 BNX2X_ERR("IGU ERROR\n");
2810 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2811 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2813 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2814 aeu_mask = REG_RD(bp, reg_addr);
2816 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2817 aeu_mask, deasserted);
2818 aeu_mask |= (deasserted & 0xff);
2819 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2821 REG_WR(bp, reg_addr, aeu_mask);
2822 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2824 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2825 bp->attn_state &= ~deasserted;
2826 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2829 static void bnx2x_attn_int(struct bnx2x *bp)
2831 /* read local copy of bits */
2832 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2834 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2836 u32 attn_state = bp->attn_state;
2838 /* look for changed bits */
2839 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2840 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2843 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2844 attn_bits, attn_ack, asserted, deasserted);
2846 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2847 BNX2X_ERR("BAD attention state\n");
2849 /* handle bits that were raised */
2851 bnx2x_attn_int_asserted(bp, asserted);
2854 bnx2x_attn_int_deasserted(bp, deasserted);
2857 static void bnx2x_sp_task(struct work_struct *work)
2859 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2863 /* Return here if interrupt is disabled */
2864 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2865 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2869 status = bnx2x_update_dsb_idx(bp);
2870 /* if (status == 0) */
2871 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2873 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2879 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2881 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2883 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2885 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2887 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2892 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2894 struct net_device *dev = dev_instance;
2895 struct bnx2x *bp = netdev_priv(dev);
2897 /* Return here if interrupt is disabled */
2898 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2899 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2903 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2905 #ifdef BNX2X_STOP_ON_ERROR
2906 if (unlikely(bp->panic))
2910 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2915 /* end of slow path */
2919 /****************************************************************************
2921 ****************************************************************************/
2923 /* sum[hi:lo] += add[hi:lo] */
2924 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2927 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2930 /* difference = minuend - subtrahend */
2931 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2933 if (m_lo < s_lo) { \
2935 d_hi = m_hi - s_hi; \
2937 /* we can 'loan' 1 */ \
2939 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2941 /* m_hi <= s_hi */ \
2946 /* m_lo >= s_lo */ \
2947 if (m_hi < s_hi) { \
2951 /* m_hi >= s_hi */ \
2952 d_hi = m_hi - s_hi; \
2953 d_lo = m_lo - s_lo; \
2958 #define UPDATE_STAT64(s, t) \
2960 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2961 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2962 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2963 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2964 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2965 pstats->mac_stx[1].t##_lo, diff.lo); \
2968 #define UPDATE_STAT64_NIG(s, t) \
2970 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2971 diff.lo, new->s##_lo, old->s##_lo); \
2972 ADD_64(estats->t##_hi, diff.hi, \
2973 estats->t##_lo, diff.lo); \
2976 /* sum[hi:lo] += add */
2977 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2980 s_hi += (s_lo < a) ? 1 : 0; \
2983 #define UPDATE_EXTEND_STAT(s) \
2985 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2986 pstats->mac_stx[1].s##_lo, \
2990 #define UPDATE_EXTEND_TSTAT(s, t) \
2992 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2993 old_tclient->s = le32_to_cpu(tclient->s); \
2994 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2997 #define UPDATE_EXTEND_USTAT(s, t) \
2999 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3000 old_uclient->s = uclient->s; \
3001 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3004 #define UPDATE_EXTEND_XSTAT(s, t) \
3006 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007 old_xclient->s = le32_to_cpu(xclient->s); \
3008 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3011 /* minuend -= subtrahend */
3012 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3014 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3017 /* minuend[hi:lo] -= subtrahend */
3018 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3020 SUB_64(m_hi, 0, m_lo, s); \
3023 #define SUB_EXTEND_USTAT(s, t) \
3025 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3026 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3030 * General service functions
3033 static inline long bnx2x_hilo(u32 *hiref)
3035 u32 lo = *(hiref + 1);
3036 #if (BITS_PER_LONG == 64)
3039 return HILO_U64(hi, lo);
3046 * Init service functions
3049 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3051 if (!bp->stats_pending) {
3052 struct eth_query_ramrod_data ramrod_data = {0};
3055 ramrod_data.drv_counter = bp->stats_counter++;
3056 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3057 for_each_queue(bp, i)
3058 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3060 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3061 ((u32 *)&ramrod_data)[1],
3062 ((u32 *)&ramrod_data)[0], 0);
3064 /* stats ramrod has it's own slot on the spq */
3066 bp->stats_pending = 1;
3071 static void bnx2x_stats_init(struct bnx2x *bp)
3073 int port = BP_PORT(bp);
3076 bp->stats_pending = 0;
3077 bp->executer_idx = 0;
3078 bp->stats_counter = 0;
3082 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3084 bp->port.port_stx = 0;
3085 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3087 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3088 bp->port.old_nig_stats.brb_discard =
3089 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3090 bp->port.old_nig_stats.brb_truncate =
3091 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3092 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3093 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3094 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3095 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3097 /* function stats */
3098 for_each_queue(bp, i) {
3099 struct bnx2x_fastpath *fp = &bp->fp[i];
3101 memset(&fp->old_tclient, 0,
3102 sizeof(struct tstorm_per_client_stats));
3103 memset(&fp->old_uclient, 0,
3104 sizeof(struct ustorm_per_client_stats));
3105 memset(&fp->old_xclient, 0,
3106 sizeof(struct xstorm_per_client_stats));
3107 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3110 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3111 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3113 bp->stats_state = STATS_STATE_DISABLED;
3114 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3115 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3118 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3120 struct dmae_command *dmae = &bp->stats_dmae;
3121 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3123 *stats_comp = DMAE_COMP_VAL;
3124 if (CHIP_REV_IS_SLOW(bp))
3128 if (bp->executer_idx) {
3129 int loader_idx = PMF_DMAE_C(bp);
3131 memset(dmae, 0, sizeof(struct dmae_command));
3133 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3134 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3135 DMAE_CMD_DST_RESET |
3137 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3139 DMAE_CMD_ENDIANITY_DW_SWAP |
3141 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3143 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3144 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3145 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3146 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3147 sizeof(struct dmae_command) *
3148 (loader_idx + 1)) >> 2;
3149 dmae->dst_addr_hi = 0;
3150 dmae->len = sizeof(struct dmae_command) >> 2;
3153 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3154 dmae->comp_addr_hi = 0;
3158 bnx2x_post_dmae(bp, dmae, loader_idx);
3160 } else if (bp->func_stx) {
3162 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3166 static int bnx2x_stats_comp(struct bnx2x *bp)
3168 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3172 while (*stats_comp != DMAE_COMP_VAL) {
3174 BNX2X_ERR("timeout waiting for stats finished\n");
3184 * Statistics service functions
3187 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3189 struct dmae_command *dmae;
3191 int loader_idx = PMF_DMAE_C(bp);
3192 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3195 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3196 BNX2X_ERR("BUG!\n");
3200 bp->executer_idx = 0;
3202 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3208 DMAE_CMD_ENDIANITY_DW_SWAP |
3210 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3211 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3213 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3214 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3215 dmae->src_addr_lo = bp->port.port_stx >> 2;
3216 dmae->src_addr_hi = 0;
3217 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3218 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3219 dmae->len = DMAE_LEN32_RD_MAX;
3220 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3221 dmae->comp_addr_hi = 0;
3224 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3225 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3226 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3227 dmae->src_addr_hi = 0;
3228 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3229 DMAE_LEN32_RD_MAX * 4);
3230 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3231 DMAE_LEN32_RD_MAX * 4);
3232 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3233 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3234 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3235 dmae->comp_val = DMAE_COMP_VAL;
3238 bnx2x_hw_stats_post(bp);
3239 bnx2x_stats_comp(bp);
3242 static void bnx2x_port_stats_init(struct bnx2x *bp)
3244 struct dmae_command *dmae;
3245 int port = BP_PORT(bp);
3246 int vn = BP_E1HVN(bp);
3248 int loader_idx = PMF_DMAE_C(bp);
3250 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3253 if (!bp->link_vars.link_up || !bp->port.pmf) {
3254 BNX2X_ERR("BUG!\n");
3258 bp->executer_idx = 0;
3261 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3262 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3263 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3265 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3267 DMAE_CMD_ENDIANITY_DW_SWAP |
3269 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3270 (vn << DMAE_CMD_E1HVN_SHIFT));
3272 if (bp->port.port_stx) {
3274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3275 dmae->opcode = opcode;
3276 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3277 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3278 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3279 dmae->dst_addr_hi = 0;
3280 dmae->len = sizeof(struct host_port_stats) >> 2;
3281 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3282 dmae->comp_addr_hi = 0;
3288 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3289 dmae->opcode = opcode;
3290 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3291 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3292 dmae->dst_addr_lo = bp->func_stx >> 2;
3293 dmae->dst_addr_hi = 0;
3294 dmae->len = sizeof(struct host_func_stats) >> 2;
3295 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3296 dmae->comp_addr_hi = 0;
3301 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3302 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3303 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3305 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3307 DMAE_CMD_ENDIANITY_DW_SWAP |
3309 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310 (vn << DMAE_CMD_E1HVN_SHIFT));
3312 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3314 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3315 NIG_REG_INGRESS_BMAC0_MEM);
3317 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3318 BIGMAC_REGISTER_TX_STAT_GTBYT */
3319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3320 dmae->opcode = opcode;
3321 dmae->src_addr_lo = (mac_addr +
3322 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3323 dmae->src_addr_hi = 0;
3324 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3325 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3326 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3327 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329 dmae->comp_addr_hi = 0;
3332 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3333 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335 dmae->opcode = opcode;
3336 dmae->src_addr_lo = (mac_addr +
3337 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3338 dmae->src_addr_hi = 0;
3339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3340 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3341 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3342 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3343 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3344 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3345 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3346 dmae->comp_addr_hi = 0;
3349 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3351 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3353 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = opcode;
3356 dmae->src_addr_lo = (mac_addr +
3357 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3358 dmae->src_addr_hi = 0;
3359 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3360 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3361 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3366 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3373 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3375 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378 dmae->comp_addr_hi = 0;
3381 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3382 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3383 dmae->opcode = opcode;
3384 dmae->src_addr_lo = (mac_addr +
3385 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3386 dmae->src_addr_hi = 0;
3387 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3388 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3390 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3391 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3392 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3393 dmae->comp_addr_hi = 0;
3398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3399 dmae->opcode = opcode;
3400 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3401 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3402 dmae->src_addr_hi = 0;
3403 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3404 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3405 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3407 dmae->comp_addr_hi = 0;
3410 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3411 dmae->opcode = opcode;
3412 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3413 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3414 dmae->src_addr_hi = 0;
3415 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3416 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3417 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3418 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3419 dmae->len = (2*sizeof(u32)) >> 2;
3420 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3421 dmae->comp_addr_hi = 0;
3424 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3425 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3426 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3427 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3429 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3431 DMAE_CMD_ENDIANITY_DW_SWAP |
3433 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3434 (vn << DMAE_CMD_E1HVN_SHIFT));
3435 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3436 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3437 dmae->src_addr_hi = 0;
3438 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3439 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3440 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3441 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3442 dmae->len = (2*sizeof(u32)) >> 2;
3443 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3444 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3445 dmae->comp_val = DMAE_COMP_VAL;
3450 static void bnx2x_func_stats_init(struct bnx2x *bp)
3452 struct dmae_command *dmae = &bp->stats_dmae;
3453 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3456 if (!bp->func_stx) {
3457 BNX2X_ERR("BUG!\n");
3461 bp->executer_idx = 0;
3462 memset(dmae, 0, sizeof(struct dmae_command));
3464 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3465 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3466 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3468 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3470 DMAE_CMD_ENDIANITY_DW_SWAP |
3472 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3473 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3474 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3475 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3476 dmae->dst_addr_lo = bp->func_stx >> 2;
3477 dmae->dst_addr_hi = 0;
3478 dmae->len = sizeof(struct host_func_stats) >> 2;
3479 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3480 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3481 dmae->comp_val = DMAE_COMP_VAL;
3486 static void bnx2x_stats_start(struct bnx2x *bp)
3489 bnx2x_port_stats_init(bp);
3491 else if (bp->func_stx)
3492 bnx2x_func_stats_init(bp);
3494 bnx2x_hw_stats_post(bp);
3495 bnx2x_storm_stats_post(bp);
3498 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3500 bnx2x_stats_comp(bp);
3501 bnx2x_stats_pmf_update(bp);
3502 bnx2x_stats_start(bp);
3505 static void bnx2x_stats_restart(struct bnx2x *bp)
3507 bnx2x_stats_comp(bp);
3508 bnx2x_stats_start(bp);
3511 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3513 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3514 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3515 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3516 struct regpair diff;
3518 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3519 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3520 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3521 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3522 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3523 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3524 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3525 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3526 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3527 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3528 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3529 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3530 UPDATE_STAT64(tx_stat_gt127,
3531 tx_stat_etherstatspkts65octetsto127octets);
3532 UPDATE_STAT64(tx_stat_gt255,
3533 tx_stat_etherstatspkts128octetsto255octets);
3534 UPDATE_STAT64(tx_stat_gt511,
3535 tx_stat_etherstatspkts256octetsto511octets);
3536 UPDATE_STAT64(tx_stat_gt1023,
3537 tx_stat_etherstatspkts512octetsto1023octets);
3538 UPDATE_STAT64(tx_stat_gt1518,
3539 tx_stat_etherstatspkts1024octetsto1522octets);
3540 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3541 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3542 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3543 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3544 UPDATE_STAT64(tx_stat_gterr,
3545 tx_stat_dot3statsinternalmactransmiterrors);
3546 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3548 estats->pause_frames_received_hi =
3549 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3550 estats->pause_frames_received_lo =
3551 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3553 estats->pause_frames_sent_hi =
3554 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3555 estats->pause_frames_sent_lo =
3556 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3559 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3561 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3562 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3563 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3565 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3566 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3567 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3568 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3569 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3570 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3571 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3572 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3573 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3574 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3575 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3576 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3577 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3578 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3579 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3580 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3581 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3582 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3583 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3584 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3585 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3586 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3587 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3588 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3589 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3590 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3591 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3592 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3593 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3594 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3595 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3597 estats->pause_frames_received_hi =
3598 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3599 estats->pause_frames_received_lo =
3600 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3601 ADD_64(estats->pause_frames_received_hi,
3602 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3603 estats->pause_frames_received_lo,
3604 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3606 estats->pause_frames_sent_hi =
3607 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3608 estats->pause_frames_sent_lo =
3609 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3610 ADD_64(estats->pause_frames_sent_hi,
3611 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3612 estats->pause_frames_sent_lo,
3613 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3616 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3618 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3619 struct nig_stats *old = &(bp->port.old_nig_stats);
3620 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3621 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3622 struct regpair diff;
3625 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3626 bnx2x_bmac_stats_update(bp);
3628 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3629 bnx2x_emac_stats_update(bp);
3631 else { /* unreached */
3632 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3636 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3637 new->brb_discard - old->brb_discard);
3638 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3639 new->brb_truncate - old->brb_truncate);
3641 UPDATE_STAT64_NIG(egress_mac_pkt0,
3642 etherstatspkts1024octetsto1522octets);
3643 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3645 memcpy(old, new, sizeof(struct nig_stats));
3647 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3648 sizeof(struct mac_stx));
3649 estats->brb_drop_hi = pstats->brb_drop_hi;
3650 estats->brb_drop_lo = pstats->brb_drop_lo;
3652 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3654 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3655 if (nig_timer_max != estats->nig_timer_max) {
3656 estats->nig_timer_max = nig_timer_max;
3657 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3663 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3665 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3666 struct tstorm_per_port_stats *tport =
3667 &stats->tstorm_common.port_statistics;
3668 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3669 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3672 memset(&(fstats->total_bytes_received_hi), 0,
3673 sizeof(struct host_func_stats) - 2*sizeof(u32));
3674 estats->error_bytes_received_hi = 0;
3675 estats->error_bytes_received_lo = 0;
3676 estats->etherstatsoverrsizepkts_hi = 0;
3677 estats->etherstatsoverrsizepkts_lo = 0;
3678 estats->no_buff_discard_hi = 0;
3679 estats->no_buff_discard_lo = 0;
3681 for_each_queue(bp, i) {
3682 struct bnx2x_fastpath *fp = &bp->fp[i];
3683 int cl_id = fp->cl_id;
3684 struct tstorm_per_client_stats *tclient =
3685 &stats->tstorm_common.client_statistics[cl_id];
3686 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3687 struct ustorm_per_client_stats *uclient =
3688 &stats->ustorm_common.client_statistics[cl_id];
3689 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3690 struct xstorm_per_client_stats *xclient =
3691 &stats->xstorm_common.client_statistics[cl_id];
3692 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3693 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3696 /* are storm stats valid? */
3697 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3698 bp->stats_counter) {
3699 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3700 " xstorm counter (%d) != stats_counter (%d)\n",
3701 i, xclient->stats_counter, bp->stats_counter);
3704 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3705 bp->stats_counter) {
3706 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3707 " tstorm counter (%d) != stats_counter (%d)\n",
3708 i, tclient->stats_counter, bp->stats_counter);
3711 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3712 bp->stats_counter) {
3713 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3714 " ustorm counter (%d) != stats_counter (%d)\n",
3715 i, uclient->stats_counter, bp->stats_counter);
3719 qstats->total_bytes_received_hi =
3720 qstats->valid_bytes_received_hi =
3721 le32_to_cpu(tclient->total_rcv_bytes.hi);
3722 qstats->total_bytes_received_lo =
3723 qstats->valid_bytes_received_lo =
3724 le32_to_cpu(tclient->total_rcv_bytes.lo);
3726 qstats->error_bytes_received_hi =
3727 le32_to_cpu(tclient->rcv_error_bytes.hi);
3728 qstats->error_bytes_received_lo =
3729 le32_to_cpu(tclient->rcv_error_bytes.lo);
3731 ADD_64(qstats->total_bytes_received_hi,
3732 qstats->error_bytes_received_hi,
3733 qstats->total_bytes_received_lo,
3734 qstats->error_bytes_received_lo);
3736 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3737 total_unicast_packets_received);
3738 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3739 total_multicast_packets_received);
3740 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3741 total_broadcast_packets_received);
3742 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3743 etherstatsoverrsizepkts);
3744 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3746 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3747 total_unicast_packets_received);
3748 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3749 total_multicast_packets_received);
3750 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3751 total_broadcast_packets_received);
3752 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3753 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3754 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3756 qstats->total_bytes_transmitted_hi =
3757 le32_to_cpu(xclient->total_sent_bytes.hi);
3758 qstats->total_bytes_transmitted_lo =
3759 le32_to_cpu(xclient->total_sent_bytes.lo);
3761 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3762 total_unicast_packets_transmitted);
3763 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3764 total_multicast_packets_transmitted);
3765 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3766 total_broadcast_packets_transmitted);
3768 old_tclient->checksum_discard = tclient->checksum_discard;
3769 old_tclient->ttl0_discard = tclient->ttl0_discard;
3771 ADD_64(fstats->total_bytes_received_hi,
3772 qstats->total_bytes_received_hi,
3773 fstats->total_bytes_received_lo,
3774 qstats->total_bytes_received_lo);
3775 ADD_64(fstats->total_bytes_transmitted_hi,
3776 qstats->total_bytes_transmitted_hi,
3777 fstats->total_bytes_transmitted_lo,
3778 qstats->total_bytes_transmitted_lo);
3779 ADD_64(fstats->total_unicast_packets_received_hi,
3780 qstats->total_unicast_packets_received_hi,
3781 fstats->total_unicast_packets_received_lo,
3782 qstats->total_unicast_packets_received_lo);
3783 ADD_64(fstats->total_multicast_packets_received_hi,
3784 qstats->total_multicast_packets_received_hi,
3785 fstats->total_multicast_packets_received_lo,
3786 qstats->total_multicast_packets_received_lo);
3787 ADD_64(fstats->total_broadcast_packets_received_hi,
3788 qstats->total_broadcast_packets_received_hi,
3789 fstats->total_broadcast_packets_received_lo,
3790 qstats->total_broadcast_packets_received_lo);
3791 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3792 qstats->total_unicast_packets_transmitted_hi,
3793 fstats->total_unicast_packets_transmitted_lo,
3794 qstats->total_unicast_packets_transmitted_lo);
3795 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3796 qstats->total_multicast_packets_transmitted_hi,
3797 fstats->total_multicast_packets_transmitted_lo,
3798 qstats->total_multicast_packets_transmitted_lo);
3799 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3800 qstats->total_broadcast_packets_transmitted_hi,
3801 fstats->total_broadcast_packets_transmitted_lo,
3802 qstats->total_broadcast_packets_transmitted_lo);
3803 ADD_64(fstats->valid_bytes_received_hi,
3804 qstats->valid_bytes_received_hi,
3805 fstats->valid_bytes_received_lo,
3806 qstats->valid_bytes_received_lo);
3808 ADD_64(estats->error_bytes_received_hi,
3809 qstats->error_bytes_received_hi,
3810 estats->error_bytes_received_lo,
3811 qstats->error_bytes_received_lo);
3812 ADD_64(estats->etherstatsoverrsizepkts_hi,
3813 qstats->etherstatsoverrsizepkts_hi,
3814 estats->etherstatsoverrsizepkts_lo,
3815 qstats->etherstatsoverrsizepkts_lo);
3816 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3817 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3820 ADD_64(fstats->total_bytes_received_hi,
3821 estats->rx_stat_ifhcinbadoctets_hi,
3822 fstats->total_bytes_received_lo,
3823 estats->rx_stat_ifhcinbadoctets_lo);
3825 memcpy(estats, &(fstats->total_bytes_received_hi),
3826 sizeof(struct host_func_stats) - 2*sizeof(u32));
3828 ADD_64(estats->etherstatsoverrsizepkts_hi,
3829 estats->rx_stat_dot3statsframestoolong_hi,
3830 estats->etherstatsoverrsizepkts_lo,
3831 estats->rx_stat_dot3statsframestoolong_lo);
3832 ADD_64(estats->error_bytes_received_hi,
3833 estats->rx_stat_ifhcinbadoctets_hi,
3834 estats->error_bytes_received_lo,
3835 estats->rx_stat_ifhcinbadoctets_lo);
3838 estats->mac_filter_discard =
3839 le32_to_cpu(tport->mac_filter_discard);
3840 estats->xxoverflow_discard =
3841 le32_to_cpu(tport->xxoverflow_discard);
3842 estats->brb_truncate_discard =
3843 le32_to_cpu(tport->brb_truncate_discard);
3844 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3847 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3849 bp->stats_pending = 0;
3854 static void bnx2x_net_stats_update(struct bnx2x *bp)
3856 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3857 struct net_device_stats *nstats = &bp->dev->stats;
3860 nstats->rx_packets =
3861 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3862 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3863 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3865 nstats->tx_packets =
3866 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3867 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3868 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3870 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3872 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3874 nstats->rx_dropped = estats->mac_discard;
3875 for_each_queue(bp, i)
3876 nstats->rx_dropped +=
3877 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3879 nstats->tx_dropped = 0;
3882 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3884 nstats->collisions =
3885 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3887 nstats->rx_length_errors =
3888 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3889 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3890 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3891 bnx2x_hilo(&estats->brb_truncate_hi);
3892 nstats->rx_crc_errors =
3893 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3894 nstats->rx_frame_errors =
3895 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3896 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3897 nstats->rx_missed_errors = estats->xxoverflow_discard;
3899 nstats->rx_errors = nstats->rx_length_errors +
3900 nstats->rx_over_errors +
3901 nstats->rx_crc_errors +
3902 nstats->rx_frame_errors +
3903 nstats->rx_fifo_errors +
3904 nstats->rx_missed_errors;
3906 nstats->tx_aborted_errors =
3907 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3908 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3909 nstats->tx_carrier_errors =
3910 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3911 nstats->tx_fifo_errors = 0;
3912 nstats->tx_heartbeat_errors = 0;
3913 nstats->tx_window_errors = 0;
3915 nstats->tx_errors = nstats->tx_aborted_errors +
3916 nstats->tx_carrier_errors +
3917 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3920 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3922 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3925 estats->driver_xoff = 0;
3926 estats->rx_err_discard_pkt = 0;
3927 estats->rx_skb_alloc_failed = 0;
3928 estats->hw_csum_err = 0;
3929 for_each_queue(bp, i) {
3930 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3932 estats->driver_xoff += qstats->driver_xoff;
3933 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3934 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3935 estats->hw_csum_err += qstats->hw_csum_err;
3939 static void bnx2x_stats_update(struct bnx2x *bp)
3941 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3943 if (*stats_comp != DMAE_COMP_VAL)
3947 bnx2x_hw_stats_update(bp);
3949 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3950 BNX2X_ERR("storm stats were not updated for 3 times\n");
3955 bnx2x_net_stats_update(bp);
3956 bnx2x_drv_stats_update(bp);
3958 if (bp->msglevel & NETIF_MSG_TIMER) {
3959 struct tstorm_per_client_stats *old_tclient =
3960 &bp->fp->old_tclient;
3961 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3962 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3963 struct net_device_stats *nstats = &bp->dev->stats;
3966 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3967 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3969 bnx2x_tx_avail(bp->fp),
3970 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3971 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3973 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3974 bp->fp->rx_comp_cons),
3975 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3976 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3977 "brb truncate %u\n",
3978 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3979 qstats->driver_xoff,
3980 estats->brb_drop_lo, estats->brb_truncate_lo);
3981 printk(KERN_DEBUG "tstats: checksum_discard %u "
3982 "packets_too_big_discard %lu no_buff_discard %lu "
3983 "mac_discard %u mac_filter_discard %u "
3984 "xxovrflow_discard %u brb_truncate_discard %u "
3985 "ttl0_discard %u\n",
3986 old_tclient->checksum_discard,
3987 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3988 bnx2x_hilo(&qstats->no_buff_discard_hi),
3989 estats->mac_discard, estats->mac_filter_discard,
3990 estats->xxoverflow_discard, estats->brb_truncate_discard,
3991 old_tclient->ttl0_discard);
3993 for_each_queue(bp, i) {
3994 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3995 bnx2x_fp(bp, i, tx_pkt),
3996 bnx2x_fp(bp, i, rx_pkt),
3997 bnx2x_fp(bp, i, rx_calls));
4001 bnx2x_hw_stats_post(bp);
4002 bnx2x_storm_stats_post(bp);
4005 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4007 struct dmae_command *dmae;
4009 int loader_idx = PMF_DMAE_C(bp);
4010 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4012 bp->executer_idx = 0;
4014 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4016 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4018 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4020 DMAE_CMD_ENDIANITY_DW_SWAP |
4022 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4023 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4025 if (bp->port.port_stx) {
4027 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4029 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4031 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4032 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4033 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4034 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4035 dmae->dst_addr_hi = 0;
4036 dmae->len = sizeof(struct host_port_stats) >> 2;
4038 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4039 dmae->comp_addr_hi = 0;
4042 dmae->comp_addr_lo =
4043 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4044 dmae->comp_addr_hi =
4045 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4046 dmae->comp_val = DMAE_COMP_VAL;
4054 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4055 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4056 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4057 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4058 dmae->dst_addr_lo = bp->func_stx >> 2;
4059 dmae->dst_addr_hi = 0;
4060 dmae->len = sizeof(struct host_func_stats) >> 2;
4061 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4062 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4063 dmae->comp_val = DMAE_COMP_VAL;
4069 static void bnx2x_stats_stop(struct bnx2x *bp)
4073 bnx2x_stats_comp(bp);
4076 update = (bnx2x_hw_stats_update(bp) == 0);
4078 update |= (bnx2x_storm_stats_update(bp) == 0);
4081 bnx2x_net_stats_update(bp);
4084 bnx2x_port_stats_stop(bp);
4086 bnx2x_hw_stats_post(bp);
4087 bnx2x_stats_comp(bp);
4091 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4095 static const struct {
4096 void (*action)(struct bnx2x *bp);
4097 enum bnx2x_stats_state next_state;
4098 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4101 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4102 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4103 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4104 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4107 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4108 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4109 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4110 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4114 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4116 enum bnx2x_stats_state state = bp->stats_state;
4118 bnx2x_stats_stm[state][event].action(bp);
4119 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4121 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4122 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4123 state, event, bp->stats_state);
4126 static void bnx2x_timer(unsigned long data)
4128 struct bnx2x *bp = (struct bnx2x *) data;
4130 if (!netif_running(bp->dev))
4133 if (atomic_read(&bp->intr_sem) != 0)
4137 struct bnx2x_fastpath *fp = &bp->fp[0];
4140 bnx2x_tx_int(fp, 1000);
4141 rc = bnx2x_rx_int(fp, 1000);
4144 if (!BP_NOMCP(bp)) {
4145 int func = BP_FUNC(bp);
4149 ++bp->fw_drv_pulse_wr_seq;
4150 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4151 /* TBD - add SYSTEM_TIME */
4152 drv_pulse = bp->fw_drv_pulse_wr_seq;
4153 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4155 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4156 MCP_PULSE_SEQ_MASK);
4157 /* The delta between driver pulse and mcp response
4158 * should be 1 (before mcp response) or 0 (after mcp response)
4160 if ((drv_pulse != mcp_pulse) &&
4161 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4162 /* someone lost a heartbeat... */
4163 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4164 drv_pulse, mcp_pulse);
4168 if ((bp->state == BNX2X_STATE_OPEN) ||
4169 (bp->state == BNX2X_STATE_DISABLED))
4170 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4173 mod_timer(&bp->timer, jiffies + bp->current_interval);
4176 /* end of Statistics */
4181 * nic init service functions
4184 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4186 int port = BP_PORT(bp);
4188 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4189 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4190 sizeof(struct ustorm_status_block)/4);
4191 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4192 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4193 sizeof(struct cstorm_status_block)/4);
4196 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4197 dma_addr_t mapping, int sb_id)
4199 int port = BP_PORT(bp);
4200 int func = BP_FUNC(bp);
4205 section = ((u64)mapping) + offsetof(struct host_status_block,
4207 sb->u_status_block.status_block_id = sb_id;
4209 REG_WR(bp, BAR_USTRORM_INTMEM +
4210 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4211 REG_WR(bp, BAR_USTRORM_INTMEM +
4212 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4214 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4215 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4217 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4218 REG_WR16(bp, BAR_USTRORM_INTMEM +
4219 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4222 section = ((u64)mapping) + offsetof(struct host_status_block,
4224 sb->c_status_block.status_block_id = sb_id;
4226 REG_WR(bp, BAR_CSTRORM_INTMEM +
4227 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4228 REG_WR(bp, BAR_CSTRORM_INTMEM +
4229 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4231 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4232 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4234 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4235 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4236 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4238 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4241 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4243 int func = BP_FUNC(bp);
4245 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4246 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4247 sizeof(struct ustorm_def_status_block)/4);
4248 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4249 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4250 sizeof(struct cstorm_def_status_block)/4);
4251 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4252 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4253 sizeof(struct xstorm_def_status_block)/4);
4254 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4255 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4256 sizeof(struct tstorm_def_status_block)/4);
4259 static void bnx2x_init_def_sb(struct bnx2x *bp,
4260 struct host_def_status_block *def_sb,
4261 dma_addr_t mapping, int sb_id)
4263 int port = BP_PORT(bp);
4264 int func = BP_FUNC(bp);
4265 int index, val, reg_offset;
4269 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4270 atten_status_block);
4271 def_sb->atten_status_block.status_block_id = sb_id;
4275 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4276 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4278 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4279 bp->attn_group[index].sig[0] = REG_RD(bp,
4280 reg_offset + 0x10*index);
4281 bp->attn_group[index].sig[1] = REG_RD(bp,
4282 reg_offset + 0x4 + 0x10*index);
4283 bp->attn_group[index].sig[2] = REG_RD(bp,
4284 reg_offset + 0x8 + 0x10*index);
4285 bp->attn_group[index].sig[3] = REG_RD(bp,
4286 reg_offset + 0xc + 0x10*index);
4289 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4290 HC_REG_ATTN_MSG0_ADDR_L);
4292 REG_WR(bp, reg_offset, U64_LO(section));
4293 REG_WR(bp, reg_offset + 4, U64_HI(section));
4295 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4297 val = REG_RD(bp, reg_offset);
4299 REG_WR(bp, reg_offset, val);
4302 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4303 u_def_status_block);
4304 def_sb->u_def_status_block.status_block_id = sb_id;
4306 REG_WR(bp, BAR_USTRORM_INTMEM +
4307 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4308 REG_WR(bp, BAR_USTRORM_INTMEM +
4309 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4311 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4312 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4314 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4315 REG_WR16(bp, BAR_USTRORM_INTMEM +
4316 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4319 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4320 c_def_status_block);
4321 def_sb->c_def_status_block.status_block_id = sb_id;
4323 REG_WR(bp, BAR_CSTRORM_INTMEM +
4324 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4325 REG_WR(bp, BAR_CSTRORM_INTMEM +
4326 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4328 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4329 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4331 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4332 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4333 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4336 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4337 t_def_status_block);
4338 def_sb->t_def_status_block.status_block_id = sb_id;
4340 REG_WR(bp, BAR_TSTRORM_INTMEM +
4341 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4342 REG_WR(bp, BAR_TSTRORM_INTMEM +
4343 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4345 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4346 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4348 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4349 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4350 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4353 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4354 x_def_status_block);
4355 def_sb->x_def_status_block.status_block_id = sb_id;
4357 REG_WR(bp, BAR_XSTRORM_INTMEM +
4358 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4359 REG_WR(bp, BAR_XSTRORM_INTMEM +
4360 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4362 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4363 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4365 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4366 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4367 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4369 bp->stats_pending = 0;
4370 bp->set_mac_pending = 0;
4372 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4375 static void bnx2x_update_coalesce(struct bnx2x *bp)
4377 int port = BP_PORT(bp);
4380 for_each_queue(bp, i) {
4381 int sb_id = bp->fp[i].sb_id;
4383 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4384 REG_WR8(bp, BAR_USTRORM_INTMEM +
4385 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4386 U_SB_ETH_RX_CQ_INDEX),
4388 REG_WR16(bp, BAR_USTRORM_INTMEM +
4389 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4390 U_SB_ETH_RX_CQ_INDEX),
4391 bp->rx_ticks ? 0 : 1);
4393 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4394 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4395 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4396 C_SB_ETH_TX_CQ_INDEX),
4398 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4399 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4400 C_SB_ETH_TX_CQ_INDEX),
4401 bp->tx_ticks ? 0 : 1);
4405 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4406 struct bnx2x_fastpath *fp, int last)
4410 for (i = 0; i < last; i++) {
4411 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4412 struct sk_buff *skb = rx_buf->skb;
4415 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4419 if (fp->tpa_state[i] == BNX2X_TPA_START)
4420 pci_unmap_single(bp->pdev,
4421 pci_unmap_addr(rx_buf, mapping),
4423 PCI_DMA_FROMDEVICE);
4430 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4432 int func = BP_FUNC(bp);
4433 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4434 ETH_MAX_AGGREGATION_QUEUES_E1H;
4435 u16 ring_prod, cqe_ring_prod;
4438 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4440 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4442 if (bp->flags & TPA_ENABLE_FLAG) {
4444 for_each_rx_queue(bp, j) {
4445 struct bnx2x_fastpath *fp = &bp->fp[j];
4447 for (i = 0; i < max_agg_queues; i++) {
4448 fp->tpa_pool[i].skb =
4449 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4450 if (!fp->tpa_pool[i].skb) {
4451 BNX2X_ERR("Failed to allocate TPA "
4452 "skb pool for queue[%d] - "
4453 "disabling TPA on this "
4455 bnx2x_free_tpa_pool(bp, fp, i);
4456 fp->disable_tpa = 1;
4459 pci_unmap_addr_set((struct sw_rx_bd *)
4460 &bp->fp->tpa_pool[i],
4462 fp->tpa_state[i] = BNX2X_TPA_STOP;
4467 for_each_rx_queue(bp, j) {
4468 struct bnx2x_fastpath *fp = &bp->fp[j];
4471 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4472 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4474 /* "next page" elements initialization */
4476 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4477 struct eth_rx_sge *sge;
4479 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4481 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4482 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4484 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4485 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4488 bnx2x_init_sge_ring_bit_mask(fp);
4491 for (i = 1; i <= NUM_RX_RINGS; i++) {
4492 struct eth_rx_bd *rx_bd;
4494 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4496 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4497 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4499 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4500 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4504 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4505 struct eth_rx_cqe_next_page *nextpg;
4507 nextpg = (struct eth_rx_cqe_next_page *)
4508 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4510 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4511 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4513 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4514 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4517 /* Allocate SGEs and initialize the ring elements */
4518 for (i = 0, ring_prod = 0;
4519 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4521 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4522 BNX2X_ERR("was only able to allocate "
4524 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4525 /* Cleanup already allocated elements */
4526 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4527 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4528 fp->disable_tpa = 1;
4532 ring_prod = NEXT_SGE_IDX(ring_prod);
4534 fp->rx_sge_prod = ring_prod;
4536 /* Allocate BDs and initialize BD ring */
4537 fp->rx_comp_cons = 0;
4538 cqe_ring_prod = ring_prod = 0;
4539 for (i = 0; i < bp->rx_ring_size; i++) {
4540 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4541 BNX2X_ERR("was only able to allocate "
4542 "%d rx skbs on queue[%d]\n", i, j);
4543 fp->eth_q_stats.rx_skb_alloc_failed++;
4546 ring_prod = NEXT_RX_IDX(ring_prod);
4547 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4548 WARN_ON(ring_prod <= i);
4551 fp->rx_bd_prod = ring_prod;
4552 /* must not have more available CQEs than BDs */
4553 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4555 fp->rx_pkt = fp->rx_calls = 0;
4558 * this will generate an interrupt (to the TSTORM)
4559 * must only be done after chip is initialized
4561 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4566 REG_WR(bp, BAR_USTRORM_INTMEM +
4567 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4568 U64_LO(fp->rx_comp_mapping));
4569 REG_WR(bp, BAR_USTRORM_INTMEM +
4570 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4571 U64_HI(fp->rx_comp_mapping));
4575 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4579 for_each_tx_queue(bp, j) {
4580 struct bnx2x_fastpath *fp = &bp->fp[j];
4582 for (i = 1; i <= NUM_TX_RINGS; i++) {
4583 struct eth_tx_bd *tx_bd =
4584 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4587 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4588 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4590 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4591 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4594 fp->tx_pkt_prod = 0;
4595 fp->tx_pkt_cons = 0;
4598 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4603 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4605 int func = BP_FUNC(bp);
4607 spin_lock_init(&bp->spq_lock);
4609 bp->spq_left = MAX_SPQ_PENDING;
4610 bp->spq_prod_idx = 0;
4611 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4612 bp->spq_prod_bd = bp->spq;
4613 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4615 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4616 U64_LO(bp->spq_mapping));
4618 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4619 U64_HI(bp->spq_mapping));
4621 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4625 static void bnx2x_init_context(struct bnx2x *bp)
4629 for_each_queue(bp, i) {
4630 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4631 struct bnx2x_fastpath *fp = &bp->fp[i];
4632 u8 cl_id = fp->cl_id;
4633 u8 sb_id = FP_SB_ID(fp);
4635 context->ustorm_st_context.common.sb_index_numbers =
4636 BNX2X_RX_SB_INDEX_NUM;
4637 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4638 context->ustorm_st_context.common.status_block_id = sb_id;
4639 context->ustorm_st_context.common.flags =
4640 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4641 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4642 context->ustorm_st_context.common.statistics_counter_id =
4644 context->ustorm_st_context.common.mc_alignment_log_size =
4645 BNX2X_RX_ALIGN_SHIFT;
4646 context->ustorm_st_context.common.bd_buff_size =
4648 context->ustorm_st_context.common.bd_page_base_hi =
4649 U64_HI(fp->rx_desc_mapping);
4650 context->ustorm_st_context.common.bd_page_base_lo =
4651 U64_LO(fp->rx_desc_mapping);
4652 if (!fp->disable_tpa) {
4653 context->ustorm_st_context.common.flags |=
4654 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4655 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4656 context->ustorm_st_context.common.sge_buff_size =
4657 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4659 context->ustorm_st_context.common.sge_page_base_hi =
4660 U64_HI(fp->rx_sge_mapping);
4661 context->ustorm_st_context.common.sge_page_base_lo =
4662 U64_LO(fp->rx_sge_mapping);
4665 context->ustorm_ag_context.cdu_usage =
4666 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4667 CDU_REGION_NUMBER_UCM_AG,
4668 ETH_CONNECTION_TYPE);
4670 context->xstorm_st_context.tx_bd_page_base_hi =
4671 U64_HI(fp->tx_desc_mapping);
4672 context->xstorm_st_context.tx_bd_page_base_lo =
4673 U64_LO(fp->tx_desc_mapping);
4674 context->xstorm_st_context.db_data_addr_hi =
4675 U64_HI(fp->tx_prods_mapping);
4676 context->xstorm_st_context.db_data_addr_lo =
4677 U64_LO(fp->tx_prods_mapping);
4678 context->xstorm_st_context.statistics_data = (fp->cl_id |
4679 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4680 context->cstorm_st_context.sb_index_number =
4681 C_SB_ETH_TX_CQ_INDEX;
4682 context->cstorm_st_context.status_block_id = sb_id;
4684 context->xstorm_ag_context.cdu_reserved =
4685 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4686 CDU_REGION_NUMBER_XCM_AG,
4687 ETH_CONNECTION_TYPE);
4691 static void bnx2x_init_ind_table(struct bnx2x *bp)
4693 int func = BP_FUNC(bp);
4696 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4700 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4701 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4702 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4703 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4704 BP_CL_ID(bp) + (i % bp->num_rx_queues));
4707 static void bnx2x_set_client_config(struct bnx2x *bp)
4709 struct tstorm_eth_client_config tstorm_client = {0};
4710 int port = BP_PORT(bp);
4713 tstorm_client.mtu = bp->dev->mtu;
4714 tstorm_client.config_flags =
4715 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4716 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4718 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4719 tstorm_client.config_flags |=
4720 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4721 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4725 if (bp->flags & TPA_ENABLE_FLAG) {
4726 tstorm_client.max_sges_for_packet =
4727 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4728 tstorm_client.max_sges_for_packet =
4729 ((tstorm_client.max_sges_for_packet +
4730 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4731 PAGES_PER_SGE_SHIFT;
4733 tstorm_client.config_flags |=
4734 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4737 for_each_queue(bp, i) {
4738 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4740 REG_WR(bp, BAR_TSTRORM_INTMEM +
4741 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4742 ((u32 *)&tstorm_client)[0]);
4743 REG_WR(bp, BAR_TSTRORM_INTMEM +
4744 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4745 ((u32 *)&tstorm_client)[1]);
4748 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4749 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4752 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4754 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4755 int mode = bp->rx_mode;
4756 int mask = (1 << BP_L_ID(bp));
4757 int func = BP_FUNC(bp);
4760 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4763 case BNX2X_RX_MODE_NONE: /* no Rx */
4764 tstorm_mac_filter.ucast_drop_all = mask;
4765 tstorm_mac_filter.mcast_drop_all = mask;
4766 tstorm_mac_filter.bcast_drop_all = mask;
4768 case BNX2X_RX_MODE_NORMAL:
4769 tstorm_mac_filter.bcast_accept_all = mask;
4771 case BNX2X_RX_MODE_ALLMULTI:
4772 tstorm_mac_filter.mcast_accept_all = mask;
4773 tstorm_mac_filter.bcast_accept_all = mask;
4775 case BNX2X_RX_MODE_PROMISC:
4776 tstorm_mac_filter.ucast_accept_all = mask;
4777 tstorm_mac_filter.mcast_accept_all = mask;
4778 tstorm_mac_filter.bcast_accept_all = mask;
4781 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4785 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4786 REG_WR(bp, BAR_TSTRORM_INTMEM +
4787 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4788 ((u32 *)&tstorm_mac_filter)[i]);
4790 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4791 ((u32 *)&tstorm_mac_filter)[i]); */
4794 if (mode != BNX2X_RX_MODE_NONE)
4795 bnx2x_set_client_config(bp);
4798 static void bnx2x_init_internal_common(struct bnx2x *bp)
4802 if (bp->flags & TPA_ENABLE_FLAG) {
4803 struct tstorm_eth_tpa_exist tpa = {0};
4807 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4809 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4813 /* Zero this manually as its initialization is
4814 currently missing in the initTool */
4815 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4816 REG_WR(bp, BAR_USTRORM_INTMEM +
4817 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4820 static void bnx2x_init_internal_port(struct bnx2x *bp)
4822 int port = BP_PORT(bp);
4824 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4825 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4826 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4827 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4830 /* Calculates the sum of vn_min_rates.
4831 It's needed for further normalizing of the min_rates.
4833 sum of vn_min_rates.
4835 0 - if all the min_rates are 0.
4836 In the later case fainess algorithm should be deactivated.
4837 If not all min_rates are zero then those that are zeroes will be set to 1.
4839 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4842 int port = BP_PORT(bp);
4845 bp->vn_weight_sum = 0;
4846 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4847 int func = 2*vn + port;
4849 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4850 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4851 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4853 /* Skip hidden vns */
4854 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4857 /* If min rate is zero - set it to 1 */
4859 vn_min_rate = DEF_MIN_RATE;
4863 bp->vn_weight_sum += vn_min_rate;
4866 /* ... only if all min rates are zeros - disable fairness */
4868 bp->vn_weight_sum = 0;
4871 static void bnx2x_init_internal_func(struct bnx2x *bp)
4873 struct tstorm_eth_function_common_config tstorm_config = {0};
4874 struct stats_indication_flags stats_flags = {0};
4875 int port = BP_PORT(bp);
4876 int func = BP_FUNC(bp);
4882 tstorm_config.config_flags = MULTI_FLAGS(bp);
4883 tstorm_config.rss_result_mask = MULTI_MASK;
4886 tstorm_config.config_flags |=
4887 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4889 tstorm_config.leading_client_id = BP_L_ID(bp);
4891 REG_WR(bp, BAR_TSTRORM_INTMEM +
4892 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4893 (*(u32 *)&tstorm_config));
4895 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4896 bnx2x_set_storm_rx_mode(bp);
4898 for_each_queue(bp, i) {
4899 u8 cl_id = bp->fp[i].cl_id;
4901 /* reset xstorm per client statistics */
4902 offset = BAR_XSTRORM_INTMEM +
4903 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4905 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4906 REG_WR(bp, offset + j*4, 0);
4908 /* reset tstorm per client statistics */
4909 offset = BAR_TSTRORM_INTMEM +
4910 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4912 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4913 REG_WR(bp, offset + j*4, 0);
4915 /* reset ustorm per client statistics */
4916 offset = BAR_USTRORM_INTMEM +
4917 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4919 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4920 REG_WR(bp, offset + j*4, 0);
4923 /* Init statistics related context */
4924 stats_flags.collect_eth = 1;
4926 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4927 ((u32 *)&stats_flags)[0]);
4928 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4929 ((u32 *)&stats_flags)[1]);
4931 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4932 ((u32 *)&stats_flags)[0]);
4933 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4934 ((u32 *)&stats_flags)[1]);
4936 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4937 ((u32 *)&stats_flags)[0]);
4938 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4939 ((u32 *)&stats_flags)[1]);
4941 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4942 ((u32 *)&stats_flags)[0]);
4943 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4944 ((u32 *)&stats_flags)[1]);
4946 REG_WR(bp, BAR_XSTRORM_INTMEM +
4947 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4948 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4949 REG_WR(bp, BAR_XSTRORM_INTMEM +
4950 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4951 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4953 REG_WR(bp, BAR_TSTRORM_INTMEM +
4954 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4955 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4956 REG_WR(bp, BAR_TSTRORM_INTMEM +
4957 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4958 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4960 REG_WR(bp, BAR_USTRORM_INTMEM +
4961 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4962 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4963 REG_WR(bp, BAR_USTRORM_INTMEM +
4964 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4965 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4967 if (CHIP_IS_E1H(bp)) {
4968 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4970 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4972 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4974 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4977 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4981 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4983 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4984 SGE_PAGE_SIZE * PAGES_PER_SGE),
4986 for_each_rx_queue(bp, i) {
4987 struct bnx2x_fastpath *fp = &bp->fp[i];
4989 REG_WR(bp, BAR_USTRORM_INTMEM +
4990 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4991 U64_LO(fp->rx_comp_mapping));
4992 REG_WR(bp, BAR_USTRORM_INTMEM +
4993 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4994 U64_HI(fp->rx_comp_mapping));
4996 REG_WR16(bp, BAR_USTRORM_INTMEM +
4997 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
5001 /* dropless flow control */
5002 if (CHIP_IS_E1H(bp)) {
5003 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5005 rx_pause.bd_thr_low = 250;
5006 rx_pause.cqe_thr_low = 250;
5008 rx_pause.sge_thr_low = 0;
5009 rx_pause.bd_thr_high = 350;
5010 rx_pause.cqe_thr_high = 350;
5011 rx_pause.sge_thr_high = 0;
5013 for_each_rx_queue(bp, i) {
5014 struct bnx2x_fastpath *fp = &bp->fp[i];
5016 if (!fp->disable_tpa) {
5017 rx_pause.sge_thr_low = 150;
5018 rx_pause.sge_thr_high = 250;
5022 offset = BAR_USTRORM_INTMEM +
5023 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5026 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5028 REG_WR(bp, offset + j*4,
5029 ((u32 *)&rx_pause)[j]);
5033 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5035 /* Init rate shaping and fairness contexts */
5039 /* During init there is no active link
5040 Until link is up, set link rate to 10Gbps */
5041 bp->link_vars.line_speed = SPEED_10000;
5042 bnx2x_init_port_minmax(bp);
5044 bnx2x_calc_vn_weight_sum(bp);
5046 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5047 bnx2x_init_vn_minmax(bp, 2*vn + port);
5049 /* Enable rate shaping and fairness */
5050 bp->cmng.flags.cmng_enables =
5051 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5052 if (bp->vn_weight_sum)
5053 bp->cmng.flags.cmng_enables |=
5054 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5056 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5057 " fairness will be disabled\n");
5059 /* rate shaping and fairness are disabled */
5061 "single function mode minmax will be disabled\n");
5065 /* Store it to internal memory */
5067 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5068 REG_WR(bp, BAR_XSTRORM_INTMEM +
5069 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5070 ((u32 *)(&bp->cmng))[i]);
5073 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5075 switch (load_code) {
5076 case FW_MSG_CODE_DRV_LOAD_COMMON:
5077 bnx2x_init_internal_common(bp);
5080 case FW_MSG_CODE_DRV_LOAD_PORT:
5081 bnx2x_init_internal_port(bp);
5084 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5085 bnx2x_init_internal_func(bp);
5089 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5094 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5098 for_each_queue(bp, i) {
5099 struct bnx2x_fastpath *fp = &bp->fp[i];
5102 fp->state = BNX2X_FP_STATE_CLOSED;
5104 fp->cl_id = BP_L_ID(bp) + i;
5105 fp->sb_id = fp->cl_id;
5107 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5108 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5109 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5111 bnx2x_update_fpsb_idx(fp);
5114 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5116 bnx2x_update_dsb_idx(bp);
5117 bnx2x_update_coalesce(bp);
5118 bnx2x_init_rx_rings(bp);
5119 bnx2x_init_tx_ring(bp);
5120 bnx2x_init_sp_ring(bp);
5121 bnx2x_init_context(bp);
5122 bnx2x_init_internal(bp, load_code);
5123 bnx2x_init_ind_table(bp);
5124 bnx2x_stats_init(bp);
5126 /* At this point, we are ready for interrupts */
5127 atomic_set(&bp->intr_sem, 0);
5129 /* flush all before enabling interrupts */
5133 bnx2x_int_enable(bp);
5136 /* end of nic init */
5139 * gzip service functions
5142 static int bnx2x_gunzip_init(struct bnx2x *bp)
5144 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5145 &bp->gunzip_mapping);
5146 if (bp->gunzip_buf == NULL)
5149 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5150 if (bp->strm == NULL)
5153 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5155 if (bp->strm->workspace == NULL)
5165 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5166 bp->gunzip_mapping);
5167 bp->gunzip_buf = NULL;
5170 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5171 " un-compression\n", bp->dev->name);
5175 static void bnx2x_gunzip_end(struct bnx2x *bp)
5177 kfree(bp->strm->workspace);
5182 if (bp->gunzip_buf) {
5183 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5184 bp->gunzip_mapping);
5185 bp->gunzip_buf = NULL;
5189 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5193 /* check gzip header */
5194 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5201 if (zbuf[3] & FNAME)
5202 while ((zbuf[n++] != 0) && (n < len));
5204 bp->strm->next_in = zbuf + n;
5205 bp->strm->avail_in = len - n;
5206 bp->strm->next_out = bp->gunzip_buf;
5207 bp->strm->avail_out = FW_BUF_SIZE;
5209 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5213 rc = zlib_inflate(bp->strm, Z_FINISH);
5214 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5215 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5216 bp->dev->name, bp->strm->msg);
5218 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5219 if (bp->gunzip_outlen & 0x3)
5220 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5221 " gunzip_outlen (%d) not aligned\n",
5222 bp->dev->name, bp->gunzip_outlen);
5223 bp->gunzip_outlen >>= 2;
5225 zlib_inflateEnd(bp->strm);
5227 if (rc == Z_STREAM_END)
5233 /* nic load/unload */
5236 * General service functions
5239 /* send a NIG loopback debug packet */
5240 static void bnx2x_lb_pckt(struct bnx2x *bp)
5244 /* Ethernet source and destination addresses */
5245 wb_write[0] = 0x55555555;
5246 wb_write[1] = 0x55555555;
5247 wb_write[2] = 0x20; /* SOP */
5248 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5250 /* NON-IP protocol */
5251 wb_write[0] = 0x09000000;
5252 wb_write[1] = 0x55555555;
5253 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5254 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5257 /* some of the internal memories
5258 * are not directly readable from the driver
5259 * to test them we send debug packets
5261 static int bnx2x_int_mem_test(struct bnx2x *bp)
5267 if (CHIP_REV_IS_FPGA(bp))
5269 else if (CHIP_REV_IS_EMUL(bp))
5274 DP(NETIF_MSG_HW, "start part1\n");
5276 /* Disable inputs of parser neighbor blocks */
5277 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5278 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5279 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5280 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5282 /* Write 0 to parser credits for CFC search request */
5283 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5285 /* send Ethernet packet */
5288 /* TODO do i reset NIG statistic? */
5289 /* Wait until NIG register shows 1 packet of size 0x10 */
5290 count = 1000 * factor;
5293 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5294 val = *bnx2x_sp(bp, wb_data[0]);
5302 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5306 /* Wait until PRS register shows 1 packet */
5307 count = 1000 * factor;
5309 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5317 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5321 /* Reset and init BRB, PRS */
5322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5326 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5327 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5329 DP(NETIF_MSG_HW, "part2\n");
5331 /* Disable inputs of parser neighbor blocks */
5332 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5333 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5334 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5335 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5337 /* Write 0 to parser credits for CFC search request */
5338 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5340 /* send 10 Ethernet packets */
5341 for (i = 0; i < 10; i++)
5344 /* Wait until NIG register shows 10 + 1
5345 packets of size 11*0x10 = 0xb0 */
5346 count = 1000 * factor;
5349 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5350 val = *bnx2x_sp(bp, wb_data[0]);
5358 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5362 /* Wait until PRS register shows 2 packets */
5363 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5365 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5367 /* Write 1 to parser credits for CFC search request */
5368 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5370 /* Wait until PRS register shows 3 packets */
5371 msleep(10 * factor);
5372 /* Wait until NIG register shows 1 packet of size 0x10 */
5373 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5375 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5377 /* clear NIG EOP FIFO */
5378 for (i = 0; i < 11; i++)
5379 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5380 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5382 BNX2X_ERR("clear of NIG failed\n");
5386 /* Reset and init BRB, PRS, NIG */
5387 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5389 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5391 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5392 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5395 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5398 /* Enable inputs of parser neighbor blocks */
5399 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5400 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5401 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5402 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5404 DP(NETIF_MSG_HW, "done\n");
5409 static void enable_blocks_attention(struct bnx2x *bp)
5411 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5412 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5413 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5414 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5415 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5416 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5417 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5418 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5419 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5420 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5421 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5422 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5423 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5424 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5425 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5426 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5427 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5428 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5429 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5430 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5431 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5432 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5433 if (CHIP_REV_IS_FPGA(bp))
5434 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5436 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5437 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5438 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5439 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5440 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5441 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5442 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5443 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5444 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5445 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5449 static void bnx2x_reset_common(struct bnx2x *bp)
5452 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5454 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5457 static int bnx2x_init_common(struct bnx2x *bp)
5461 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5463 bnx2x_reset_common(bp);
5464 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5465 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5467 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5468 if (CHIP_IS_E1H(bp))
5469 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5471 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5473 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5475 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5476 if (CHIP_IS_E1(bp)) {
5477 /* enable HW interrupt from PXP on USDM overflow
5478 bit 16 on INT_MASK_0 */
5479 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5482 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5486 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5487 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5488 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5489 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5490 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5491 /* make sure this value is 0 */
5492 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5494 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5495 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5496 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5497 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5498 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5501 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5503 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5504 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5505 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5508 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5509 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5511 /* let the HW do it's magic ... */
5513 /* finish PXP init */
5514 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5516 BNX2X_ERR("PXP2 CFG failed\n");
5519 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5521 BNX2X_ERR("PXP2 RD_INIT failed\n");
5525 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5526 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5528 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5530 /* clean the DMAE memory */
5532 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5534 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5535 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5536 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5537 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5539 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5540 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5541 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5542 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5544 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5545 /* soft reset pulse */
5546 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5547 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5550 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5553 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5554 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5555 if (!CHIP_REV_IS_SLOW(bp)) {
5556 /* enable hw interrupt from doorbell Q */
5557 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5560 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5561 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5562 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5564 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5565 if (CHIP_IS_E1H(bp))
5566 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5568 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5569 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5570 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5571 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5573 if (CHIP_IS_E1H(bp)) {
5574 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5575 STORM_INTMEM_SIZE_E1H/2);
5577 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5578 0, STORM_INTMEM_SIZE_E1H/2);
5579 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5580 STORM_INTMEM_SIZE_E1H/2);
5582 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5583 0, STORM_INTMEM_SIZE_E1H/2);
5584 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5585 STORM_INTMEM_SIZE_E1H/2);
5587 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5588 0, STORM_INTMEM_SIZE_E1H/2);
5589 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5590 STORM_INTMEM_SIZE_E1H/2);
5592 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5593 0, STORM_INTMEM_SIZE_E1H/2);
5595 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5596 STORM_INTMEM_SIZE_E1);
5597 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5598 STORM_INTMEM_SIZE_E1);
5599 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5600 STORM_INTMEM_SIZE_E1);
5601 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5602 STORM_INTMEM_SIZE_E1);
5605 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5606 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5607 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5608 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5613 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5616 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5617 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5618 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5620 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5621 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5622 REG_WR(bp, i, 0xc0cac01a);
5623 /* TODO: replace with something meaningful */
5625 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5626 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5628 if (sizeof(union cdu_context) != 1024)
5629 /* we currently assume that a context is 1024 bytes */
5630 printk(KERN_ALERT PFX "please adjust the size of"
5631 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5633 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5634 val = (4 << 24) + (0 << 12) + 1024;
5635 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5636 if (CHIP_IS_E1(bp)) {
5637 /* !!! fix pxp client crdit until excel update */
5638 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5639 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5642 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5643 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5644 /* enable context validation interrupt from CFC */
5645 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5647 /* set the thresholds to prevent CFC/CDU race */
5648 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5650 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5651 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5653 /* PXPCS COMMON comes here */
5654 /* Reset PCIE errors for debug */
5655 REG_WR(bp, 0x2814, 0xffffffff);
5656 REG_WR(bp, 0x3820, 0xffffffff);
5658 /* EMAC0 COMMON comes here */
5659 /* EMAC1 COMMON comes here */
5660 /* DBU COMMON comes here */
5661 /* DBG COMMON comes here */
5663 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5664 if (CHIP_IS_E1H(bp)) {
5665 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5666 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5669 if (CHIP_REV_IS_SLOW(bp))
5672 /* finish CFC init */
5673 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5675 BNX2X_ERR("CFC LL_INIT failed\n");
5678 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5680 BNX2X_ERR("CFC AC_INIT failed\n");
5683 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5685 BNX2X_ERR("CFC CAM_INIT failed\n");
5688 REG_WR(bp, CFC_REG_DEBUG0, 0);
5690 /* read NIG statistic
5691 to see if this is our first up since powerup */
5692 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5693 val = *bnx2x_sp(bp, wb_data[0]);
5695 /* do internal memory self test */
5696 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5697 BNX2X_ERR("internal mem self test failed\n");
5701 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5702 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5703 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5704 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5705 bp->port.need_hw_lock = 1;
5708 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5709 /* Fan failure is indicated by SPIO 5 */
5710 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5711 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5713 /* set to active low mode */
5714 val = REG_RD(bp, MISC_REG_SPIO_INT);
5715 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5716 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5717 REG_WR(bp, MISC_REG_SPIO_INT, val);
5719 /* enable interrupt to signal the IGU */
5720 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5721 val |= (1 << MISC_REGISTERS_SPIO_5);
5722 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5729 /* clear PXP2 attentions */
5730 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5732 enable_blocks_attention(bp);
5734 if (!BP_NOMCP(bp)) {
5735 bnx2x_acquire_phy_lock(bp);
5736 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5737 bnx2x_release_phy_lock(bp);
5739 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5744 static int bnx2x_init_port(struct bnx2x *bp)
5746 int port = BP_PORT(bp);
5750 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5752 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5754 /* Port PXP comes here */
5755 /* Port PXP2 comes here */
5760 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5761 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5762 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5763 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5768 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5769 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5770 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5771 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5776 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5777 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5778 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5779 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5781 /* Port CMs come here */
5782 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5783 (port ? XCM_PORT1_END : XCM_PORT0_END));
5785 /* Port QM comes here */
5787 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5788 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5790 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5791 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5793 /* Port DQ comes here */
5795 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5796 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5797 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5798 /* no pause for emulation and FPGA */
5803 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5804 else if (bp->dev->mtu > 4096) {
5805 if (bp->flags & ONE_PORT_FLAG)
5809 /* (24*1024 + val*4)/256 */
5810 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5813 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5814 high = low + 56; /* 14*1024/256 */
5816 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5817 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5820 /* Port PRS comes here */
5821 /* Port TSDM comes here */
5822 /* Port CSDM comes here */
5823 /* Port USDM comes here */
5824 /* Port XSDM comes here */
5825 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5826 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5827 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5828 port ? USEM_PORT1_END : USEM_PORT0_END);
5829 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5830 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5831 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5832 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5833 /* Port UPB comes here */
5834 /* Port XPB comes here */
5836 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5837 port ? PBF_PORT1_END : PBF_PORT0_END);
5839 /* configure PBF to work without PAUSE mtu 9000 */
5840 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5842 /* update threshold */
5843 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5844 /* update init credit */
5845 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5848 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5850 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5853 /* tell the searcher where the T2 table is */
5854 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5856 wb_write[0] = U64_LO(bp->t2_mapping);
5857 wb_write[1] = U64_HI(bp->t2_mapping);
5858 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5859 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5860 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5861 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5863 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5864 /* Port SRCH comes here */
5866 /* Port CDU comes here */
5867 /* Port CFC comes here */
5869 if (CHIP_IS_E1(bp)) {
5870 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5871 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5873 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5874 port ? HC_PORT1_END : HC_PORT0_END);
5876 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5877 MISC_AEU_PORT0_START,
5878 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5879 /* init aeu_mask_attn_func_0/1:
5880 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5881 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5882 * bits 4-7 are used for "per vn group attention" */
5883 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5884 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5886 /* Port PXPCS comes here */
5887 /* Port EMAC0 comes here */
5888 /* Port EMAC1 comes here */
5889 /* Port DBU comes here */
5890 /* Port DBG comes here */
5891 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5892 port ? NIG_PORT1_END : NIG_PORT0_END);
5894 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5896 if (CHIP_IS_E1H(bp)) {
5897 /* 0x2 disable e1hov, 0x1 enable */
5898 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5899 (IS_E1HMF(bp) ? 0x1 : 0x2));
5901 /* support pause requests from USDM, TSDM and BRB */
5902 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5905 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5906 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5907 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5911 /* Port MCP comes here */
5912 /* Port DMAE comes here */
5914 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5915 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5917 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5919 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5920 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5922 /* The GPIO should be swapped if the swap register is
5924 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5925 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5927 /* Select function upon port-swap configuration */
5929 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5930 aeu_gpio_mask = (swap_val && swap_override) ?
5931 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5932 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5934 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5935 aeu_gpio_mask = (swap_val && swap_override) ?
5936 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5937 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5939 val = REG_RD(bp, offset);
5940 /* add GPIO3 to group */
5941 val |= aeu_gpio_mask;
5942 REG_WR(bp, offset, val);
5946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5947 /* add SPIO 5 to group 0 */
5948 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5949 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5950 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5957 bnx2x__link_reset(bp);
5962 #define ILT_PER_FUNC (768/2)
5963 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5964 /* the phys address is shifted right 12 bits and has an added
5965 1=valid bit added to the 53rd bit
5966 then since this is a wide register(TM)
5967 we split it into two 32 bit writes
5969 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5970 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5971 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5972 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5974 #define CNIC_ILT_LINES 0
5976 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5980 if (CHIP_IS_E1H(bp))
5981 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5983 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5985 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5988 static int bnx2x_init_func(struct bnx2x *bp)
5990 int port = BP_PORT(bp);
5991 int func = BP_FUNC(bp);
5995 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5997 /* set MSI reconfigure capability */
5998 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5999 val = REG_RD(bp, addr);
6000 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6001 REG_WR(bp, addr, val);
6003 i = FUNC_ILT_BASE(func);
6005 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6006 if (CHIP_IS_E1H(bp)) {
6007 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6008 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6010 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6011 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6014 if (CHIP_IS_E1H(bp)) {
6015 for (i = 0; i < 9; i++)
6016 bnx2x_init_block(bp,
6017 cm_start[func][i], cm_end[func][i]);
6019 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6020 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6023 /* HC init per function */
6024 if (CHIP_IS_E1H(bp)) {
6025 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6027 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6028 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6030 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6032 /* Reset PCIE errors for debug */
6033 REG_WR(bp, 0x2114, 0xffffffff);
6034 REG_WR(bp, 0x2120, 0xffffffff);
6039 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6043 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6044 BP_FUNC(bp), load_code);
6047 mutex_init(&bp->dmae_mutex);
6048 bnx2x_gunzip_init(bp);
6050 switch (load_code) {
6051 case FW_MSG_CODE_DRV_LOAD_COMMON:
6052 rc = bnx2x_init_common(bp);
6057 case FW_MSG_CODE_DRV_LOAD_PORT:
6059 rc = bnx2x_init_port(bp);
6064 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6066 rc = bnx2x_init_func(bp);
6072 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6076 if (!BP_NOMCP(bp)) {
6077 int func = BP_FUNC(bp);
6079 bp->fw_drv_pulse_wr_seq =
6080 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6081 DRV_PULSE_SEQ_MASK);
6082 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6083 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6084 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6088 /* this needs to be done before gunzip end */
6089 bnx2x_zero_def_sb(bp);
6090 for_each_queue(bp, i)
6091 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6094 bnx2x_gunzip_end(bp);
6099 /* send the MCP a request, block until there is a reply */
6100 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6102 int func = BP_FUNC(bp);
6103 u32 seq = ++bp->fw_seq;
6106 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6108 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6109 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6112 /* let the FW do it's magic ... */
6115 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6117 /* Give the FW up to 2 second (200*10ms) */
6118 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6120 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6121 cnt*delay, rc, seq);
6123 /* is this a reply to our command? */
6124 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6125 rc &= FW_MSG_CODE_MASK;
6129 BNX2X_ERR("FW failed to respond!\n");
6137 static void bnx2x_free_mem(struct bnx2x *bp)
6140 #define BNX2X_PCI_FREE(x, y, size) \
6143 pci_free_consistent(bp->pdev, size, x, y); \
6149 #define BNX2X_FREE(x) \
6161 for_each_queue(bp, i) {
6164 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6165 bnx2x_fp(bp, i, status_blk_mapping),
6166 sizeof(struct host_status_block) +
6167 sizeof(struct eth_tx_db_data));
6170 for_each_rx_queue(bp, i) {
6172 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6173 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6174 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6175 bnx2x_fp(bp, i, rx_desc_mapping),
6176 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6178 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6179 bnx2x_fp(bp, i, rx_comp_mapping),
6180 sizeof(struct eth_fast_path_rx_cqe) *
6184 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6185 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6186 bnx2x_fp(bp, i, rx_sge_mapping),
6187 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6190 for_each_tx_queue(bp, i) {
6192 /* fastpath tx rings: tx_buf tx_desc */
6193 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6194 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6195 bnx2x_fp(bp, i, tx_desc_mapping),
6196 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6198 /* end of fastpath */
6200 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6201 sizeof(struct host_def_status_block));
6203 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6204 sizeof(struct bnx2x_slowpath));
6207 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6208 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6209 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6210 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6212 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6214 #undef BNX2X_PCI_FREE
6218 static int bnx2x_alloc_mem(struct bnx2x *bp)
6221 #define BNX2X_PCI_ALLOC(x, y, size) \
6223 x = pci_alloc_consistent(bp->pdev, size, y); \
6225 goto alloc_mem_err; \
6226 memset(x, 0, size); \
6229 #define BNX2X_ALLOC(x, size) \
6231 x = vmalloc(size); \
6233 goto alloc_mem_err; \
6234 memset(x, 0, size); \
6241 for_each_queue(bp, i) {
6242 bnx2x_fp(bp, i, bp) = bp;
6245 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6246 &bnx2x_fp(bp, i, status_blk_mapping),
6247 sizeof(struct host_status_block) +
6248 sizeof(struct eth_tx_db_data));
6251 for_each_rx_queue(bp, i) {
6253 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6254 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6255 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6256 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6257 &bnx2x_fp(bp, i, rx_desc_mapping),
6258 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6260 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6261 &bnx2x_fp(bp, i, rx_comp_mapping),
6262 sizeof(struct eth_fast_path_rx_cqe) *
6266 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6267 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6268 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6269 &bnx2x_fp(bp, i, rx_sge_mapping),
6270 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6273 for_each_tx_queue(bp, i) {
6275 bnx2x_fp(bp, i, hw_tx_prods) =
6276 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6278 bnx2x_fp(bp, i, tx_prods_mapping) =
6279 bnx2x_fp(bp, i, status_blk_mapping) +
6280 sizeof(struct host_status_block);
6282 /* fastpath tx rings: tx_buf tx_desc */
6283 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6284 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6285 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6286 &bnx2x_fp(bp, i, tx_desc_mapping),
6287 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6289 /* end of fastpath */
6291 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6292 sizeof(struct host_def_status_block));
6294 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6295 sizeof(struct bnx2x_slowpath));
6298 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6301 for (i = 0; i < 64*1024; i += 64) {
6302 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6303 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6306 /* allocate searcher T2 table
6307 we allocate 1/4 of alloc num for T2
6308 (which is not entered into the ILT) */
6309 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6312 for (i = 0; i < 16*1024; i += 64)
6313 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6315 /* now fixup the last line in the block to point to the next block */
6316 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6318 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6319 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6321 /* QM queues (128*MAX_CONN) */
6322 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6325 /* Slow path ring */
6326 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6334 #undef BNX2X_PCI_ALLOC
6338 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6342 for_each_tx_queue(bp, i) {
6343 struct bnx2x_fastpath *fp = &bp->fp[i];
6345 u16 bd_cons = fp->tx_bd_cons;
6346 u16 sw_prod = fp->tx_pkt_prod;
6347 u16 sw_cons = fp->tx_pkt_cons;
6349 while (sw_cons != sw_prod) {
6350 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6356 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6360 for_each_rx_queue(bp, j) {
6361 struct bnx2x_fastpath *fp = &bp->fp[j];
6363 for (i = 0; i < NUM_RX_BD; i++) {
6364 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6365 struct sk_buff *skb = rx_buf->skb;
6370 pci_unmap_single(bp->pdev,
6371 pci_unmap_addr(rx_buf, mapping),
6373 PCI_DMA_FROMDEVICE);
6378 if (!fp->disable_tpa)
6379 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6380 ETH_MAX_AGGREGATION_QUEUES_E1 :
6381 ETH_MAX_AGGREGATION_QUEUES_E1H);
6385 static void bnx2x_free_skbs(struct bnx2x *bp)
6387 bnx2x_free_tx_skbs(bp);
6388 bnx2x_free_rx_skbs(bp);
6391 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6395 free_irq(bp->msix_table[0].vector, bp->dev);
6396 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6397 bp->msix_table[0].vector);
6399 for_each_queue(bp, i) {
6400 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6401 "state %x\n", i, bp->msix_table[i + offset].vector,
6402 bnx2x_fp(bp, i, state));
6404 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6408 static void bnx2x_free_irq(struct bnx2x *bp)
6410 if (bp->flags & USING_MSIX_FLAG) {
6411 bnx2x_free_msix_irqs(bp);
6412 pci_disable_msix(bp->pdev);
6413 bp->flags &= ~USING_MSIX_FLAG;
6415 } else if (bp->flags & USING_MSI_FLAG) {
6416 free_irq(bp->pdev->irq, bp->dev);
6417 pci_disable_msi(bp->pdev);
6418 bp->flags &= ~USING_MSI_FLAG;
6421 free_irq(bp->pdev->irq, bp->dev);
6424 static int bnx2x_enable_msix(struct bnx2x *bp)
6426 int i, rc, offset = 1;
6429 bp->msix_table[0].entry = igu_vec;
6430 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6432 for_each_queue(bp, i) {
6433 igu_vec = BP_L_ID(bp) + offset + i;
6434 bp->msix_table[i + offset].entry = igu_vec;
6435 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6436 "(fastpath #%u)\n", i + offset, igu_vec, i);
6439 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6440 BNX2X_NUM_QUEUES(bp) + offset);
6442 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6446 bp->flags |= USING_MSIX_FLAG;
6451 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6453 int i, rc, offset = 1;
6455 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6456 bp->dev->name, bp->dev);
6458 BNX2X_ERR("request sp irq failed\n");
6462 for_each_queue(bp, i) {
6463 struct bnx2x_fastpath *fp = &bp->fp[i];
6465 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6466 rc = request_irq(bp->msix_table[i + offset].vector,
6467 bnx2x_msix_fp_int, 0, fp->name, fp);
6469 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6470 bnx2x_free_msix_irqs(bp);
6474 fp->state = BNX2X_FP_STATE_IRQ;
6477 i = BNX2X_NUM_QUEUES(bp);
6479 printk(KERN_INFO PFX
6480 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6481 bp->dev->name, bp->msix_table[0].vector,
6482 bp->msix_table[offset].vector,
6483 bp->msix_table[offset + i - 1].vector);
6485 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6486 bp->dev->name, bp->msix_table[0].vector,
6487 bp->msix_table[offset + i - 1].vector);
6492 static int bnx2x_enable_msi(struct bnx2x *bp)
6496 rc = pci_enable_msi(bp->pdev);
6498 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6501 bp->flags |= USING_MSI_FLAG;
6506 static int bnx2x_req_irq(struct bnx2x *bp)
6508 unsigned long flags;
6511 if (bp->flags & USING_MSI_FLAG)
6514 flags = IRQF_SHARED;
6516 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6517 bp->dev->name, bp->dev);
6519 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6524 static void bnx2x_napi_enable(struct bnx2x *bp)
6528 for_each_rx_queue(bp, i)
6529 napi_enable(&bnx2x_fp(bp, i, napi));
6532 static void bnx2x_napi_disable(struct bnx2x *bp)
6536 for_each_rx_queue(bp, i)
6537 napi_disable(&bnx2x_fp(bp, i, napi));
6540 static void bnx2x_netif_start(struct bnx2x *bp)
6542 if (atomic_dec_and_test(&bp->intr_sem)) {
6543 if (netif_running(bp->dev)) {
6544 bnx2x_napi_enable(bp);
6545 bnx2x_int_enable(bp);
6546 if (bp->state == BNX2X_STATE_OPEN)
6547 netif_tx_wake_all_queues(bp->dev);
6552 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6554 bnx2x_int_disable_sync(bp, disable_hw);
6555 bnx2x_napi_disable(bp);
6556 if (netif_running(bp->dev)) {
6557 netif_tx_disable(bp->dev);
6558 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6563 * Init service functions
6566 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6568 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6569 int port = BP_PORT(bp);
6572 * unicasts 0-31:port0 32-63:port1
6573 * multicast 64-127:port0 128-191:port1
6575 config->hdr.length = 2;
6576 config->hdr.offset = port ? 32 : 0;
6577 config->hdr.client_id = BP_CL_ID(bp);
6578 config->hdr.reserved1 = 0;
6581 config->config_table[0].cam_entry.msb_mac_addr =
6582 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6583 config->config_table[0].cam_entry.middle_mac_addr =
6584 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6585 config->config_table[0].cam_entry.lsb_mac_addr =
6586 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6587 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6589 config->config_table[0].target_table_entry.flags = 0;
6591 CAM_INVALIDATE(config->config_table[0]);
6592 config->config_table[0].target_table_entry.client_id = 0;
6593 config->config_table[0].target_table_entry.vlan_id = 0;
6595 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6596 (set ? "setting" : "clearing"),
6597 config->config_table[0].cam_entry.msb_mac_addr,
6598 config->config_table[0].cam_entry.middle_mac_addr,
6599 config->config_table[0].cam_entry.lsb_mac_addr);
6602 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6603 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6604 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6605 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6607 config->config_table[1].target_table_entry.flags =
6608 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6610 CAM_INVALIDATE(config->config_table[1]);
6611 config->config_table[1].target_table_entry.client_id = 0;
6612 config->config_table[1].target_table_entry.vlan_id = 0;
6614 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6615 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6616 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6619 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6621 struct mac_configuration_cmd_e1h *config =
6622 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6624 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6625 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6629 /* CAM allocation for E1H
6630 * unicasts: by func number
6631 * multicast: 20+FUNC*20, 20 each
6633 config->hdr.length = 1;
6634 config->hdr.offset = BP_FUNC(bp);
6635 config->hdr.client_id = BP_CL_ID(bp);
6636 config->hdr.reserved1 = 0;
6639 config->config_table[0].msb_mac_addr =
6640 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6641 config->config_table[0].middle_mac_addr =
6642 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6643 config->config_table[0].lsb_mac_addr =
6644 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6645 config->config_table[0].client_id = BP_L_ID(bp);
6646 config->config_table[0].vlan_id = 0;
6647 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6649 config->config_table[0].flags = BP_PORT(bp);
6651 config->config_table[0].flags =
6652 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6654 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6655 (set ? "setting" : "clearing"),
6656 config->config_table[0].msb_mac_addr,
6657 config->config_table[0].middle_mac_addr,
6658 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6660 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6661 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6662 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6665 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6666 int *state_p, int poll)
6668 /* can take a while if any port is running */
6671 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6672 poll ? "polling" : "waiting", state, idx);
6677 bnx2x_rx_int(bp->fp, 10);
6678 /* if index is different from 0
6679 * the reply for some commands will
6680 * be on the non default queue
6683 bnx2x_rx_int(&bp->fp[idx], 10);
6686 mb(); /* state is changed by bnx2x_sp_event() */
6687 if (*state_p == state)
6694 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6695 poll ? "polling" : "waiting", state, idx);
6696 #ifdef BNX2X_STOP_ON_ERROR
6703 static int bnx2x_setup_leading(struct bnx2x *bp)
6707 /* reset IGU state */
6708 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6711 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6713 /* Wait for completion */
6714 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6719 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6721 struct bnx2x_fastpath *fp = &bp->fp[index];
6723 /* reset IGU state */
6724 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6727 fp->state = BNX2X_FP_STATE_OPENING;
6728 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6731 /* Wait for completion */
6732 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6736 static int bnx2x_poll(struct napi_struct *napi, int budget);
6738 static void bnx2x_set_int_mode(struct bnx2x *bp)
6746 bp->num_rx_queues = num_queues;
6747 bp->num_tx_queues = num_queues;
6749 "set number of queues to %d\n", num_queues);
6754 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6755 num_queues = min_t(u32, num_online_cpus(),
6756 BNX2X_MAX_QUEUES(bp));
6759 bp->num_rx_queues = num_queues;
6760 bp->num_tx_queues = num_queues;
6761 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6762 " number of tx queues to %d\n",
6763 bp->num_rx_queues, bp->num_tx_queues);
6764 /* if we can't use MSI-X we only need one fp,
6765 * so try to enable MSI-X with the requested number of fp's
6766 * and fallback to MSI or legacy INTx with one fp
6768 if (bnx2x_enable_msix(bp)) {
6769 /* failed to enable MSI-X */
6771 bp->num_rx_queues = num_queues;
6772 bp->num_tx_queues = num_queues;
6774 BNX2X_ERR("Multi requested but failed to "
6775 "enable MSI-X set number of "
6776 "queues to %d\n", num_queues);
6780 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6783 static void bnx2x_set_rx_mode(struct net_device *dev);
6785 /* must be called with rtnl_lock */
6786 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6790 #ifdef BNX2X_STOP_ON_ERROR
6791 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6792 if (unlikely(bp->panic))
6796 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6798 bnx2x_set_int_mode(bp);
6800 if (bnx2x_alloc_mem(bp))
6803 for_each_rx_queue(bp, i)
6804 bnx2x_fp(bp, i, disable_tpa) =
6805 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6807 for_each_rx_queue(bp, i)
6808 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6811 #ifdef BNX2X_STOP_ON_ERROR
6812 for_each_rx_queue(bp, i) {
6813 struct bnx2x_fastpath *fp = &bp->fp[i];
6815 fp->poll_no_work = 0;
6817 fp->poll_max_calls = 0;
6818 fp->poll_complete = 0;
6822 bnx2x_napi_enable(bp);
6824 if (bp->flags & USING_MSIX_FLAG) {
6825 rc = bnx2x_req_msix_irqs(bp);
6827 pci_disable_msix(bp->pdev);
6831 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6832 bnx2x_enable_msi(bp);
6834 rc = bnx2x_req_irq(bp);
6836 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6837 if (bp->flags & USING_MSI_FLAG)
6838 pci_disable_msi(bp->pdev);
6841 if (bp->flags & USING_MSI_FLAG) {
6842 bp->dev->irq = bp->pdev->irq;
6843 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6844 bp->dev->name, bp->pdev->irq);
6848 /* Send LOAD_REQUEST command to MCP
6849 Returns the type of LOAD command:
6850 if it is the first port to be initialized
6851 common blocks should be initialized, otherwise - not
6853 if (!BP_NOMCP(bp)) {
6854 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6856 BNX2X_ERR("MCP response failure, aborting\n");
6860 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6861 rc = -EBUSY; /* other port in diagnostic mode */
6866 int port = BP_PORT(bp);
6868 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6869 load_count[0], load_count[1], load_count[2]);
6871 load_count[1 + port]++;
6872 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6873 load_count[0], load_count[1], load_count[2]);
6874 if (load_count[0] == 1)
6875 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6876 else if (load_count[1 + port] == 1)
6877 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6879 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6882 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6883 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6887 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6890 rc = bnx2x_init_hw(bp, load_code);
6892 BNX2X_ERR("HW init failed, aborting\n");
6896 /* Setup NIC internals and enable interrupts */
6897 bnx2x_nic_init(bp, load_code);
6899 /* Send LOAD_DONE command to MCP */
6900 if (!BP_NOMCP(bp)) {
6901 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6903 BNX2X_ERR("MCP response failure, aborting\n");
6909 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6911 rc = bnx2x_setup_leading(bp);
6913 BNX2X_ERR("Setup leading failed!\n");
6917 if (CHIP_IS_E1H(bp))
6918 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6919 BNX2X_ERR("!!! mf_cfg function disabled\n");
6920 bp->state = BNX2X_STATE_DISABLED;
6923 if (bp->state == BNX2X_STATE_OPEN)
6924 for_each_nondefault_queue(bp, i) {
6925 rc = bnx2x_setup_multi(bp, i);
6931 bnx2x_set_mac_addr_e1(bp, 1);
6933 bnx2x_set_mac_addr_e1h(bp, 1);
6936 bnx2x_initial_phy_init(bp);
6938 /* Start fast path */
6939 switch (load_mode) {
6941 /* Tx queue should be only reenabled */
6942 netif_tx_wake_all_queues(bp->dev);
6943 /* Initialize the receive filter. */
6944 bnx2x_set_rx_mode(bp->dev);
6948 netif_tx_start_all_queues(bp->dev);
6949 /* Initialize the receive filter. */
6950 bnx2x_set_rx_mode(bp->dev);
6954 /* Initialize the receive filter. */
6955 bnx2x_set_rx_mode(bp->dev);
6956 bp->state = BNX2X_STATE_DIAG;
6964 bnx2x__link_status_update(bp);
6966 /* start the timer */
6967 mod_timer(&bp->timer, jiffies + bp->current_interval);
6973 bnx2x_int_disable_sync(bp, 1);
6974 if (!BP_NOMCP(bp)) {
6975 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6976 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6979 /* Free SKBs, SGEs, TPA pool and driver internals */
6980 bnx2x_free_skbs(bp);
6981 for_each_rx_queue(bp, i)
6982 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6987 bnx2x_napi_disable(bp);
6988 for_each_rx_queue(bp, i)
6989 netif_napi_del(&bnx2x_fp(bp, i, napi));
6992 /* TBD we really need to reset the chip
6993 if we want to recover from this */
6997 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6999 struct bnx2x_fastpath *fp = &bp->fp[index];
7002 /* halt the connection */
7003 fp->state = BNX2X_FP_STATE_HALTING;
7004 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7006 /* Wait for completion */
7007 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7009 if (rc) /* timeout */
7012 /* delete cfc entry */
7013 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7015 /* Wait for completion */
7016 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7021 static int bnx2x_stop_leading(struct bnx2x *bp)
7023 u16 dsb_sp_prod_idx;
7024 /* if the other port is handling traffic,
7025 this can take a lot of time */
7031 /* Send HALT ramrod */
7032 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7033 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
7035 /* Wait for completion */
7036 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7037 &(bp->fp[0].state), 1);
7038 if (rc) /* timeout */
7041 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7043 /* Send PORT_DELETE ramrod */
7044 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7046 /* Wait for completion to arrive on default status block
7047 we are going to reset the chip anyway
7048 so there is not much to do if this times out
7050 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7052 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7053 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7054 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7055 #ifdef BNX2X_STOP_ON_ERROR
7064 rmb(); /* Refresh the dsb_sp_prod */
7066 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7067 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7072 static void bnx2x_reset_func(struct bnx2x *bp)
7074 int port = BP_PORT(bp);
7075 int func = BP_FUNC(bp);
7079 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7080 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7083 base = FUNC_ILT_BASE(func);
7084 for (i = base; i < base + ILT_PER_FUNC; i++)
7085 bnx2x_ilt_wr(bp, i, 0);
7088 static void bnx2x_reset_port(struct bnx2x *bp)
7090 int port = BP_PORT(bp);
7093 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7095 /* Do not rcv packets to BRB */
7096 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7097 /* Do not direct rcv packets that are not for MCP to the BRB */
7098 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7099 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7102 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7105 /* Check for BRB port occupancy */
7106 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7108 DP(NETIF_MSG_IFDOWN,
7109 "BRB1 is not empty %d blocks are occupied\n", val);
7111 /* TODO: Close Doorbell port? */
7114 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7116 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7117 BP_FUNC(bp), reset_code);
7119 switch (reset_code) {
7120 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7121 bnx2x_reset_port(bp);
7122 bnx2x_reset_func(bp);
7123 bnx2x_reset_common(bp);
7126 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7127 bnx2x_reset_port(bp);
7128 bnx2x_reset_func(bp);
7131 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7132 bnx2x_reset_func(bp);
7136 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7141 /* must be called with rtnl_lock */
7142 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7144 int port = BP_PORT(bp);
7148 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7150 bp->rx_mode = BNX2X_RX_MODE_NONE;
7151 bnx2x_set_storm_rx_mode(bp);
7153 bnx2x_netif_stop(bp, 1);
7155 del_timer_sync(&bp->timer);
7156 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7157 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7158 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7163 /* Wait until tx fastpath tasks complete */
7164 for_each_tx_queue(bp, i) {
7165 struct bnx2x_fastpath *fp = &bp->fp[i];
7169 while (bnx2x_has_tx_work_unload(fp)) {
7171 bnx2x_tx_int(fp, 1000);
7173 BNX2X_ERR("timeout waiting for queue[%d]\n",
7175 #ifdef BNX2X_STOP_ON_ERROR
7187 /* Give HW time to discard old tx messages */
7190 if (CHIP_IS_E1(bp)) {
7191 struct mac_configuration_cmd *config =
7192 bnx2x_sp(bp, mcast_config);
7194 bnx2x_set_mac_addr_e1(bp, 0);
7196 for (i = 0; i < config->hdr.length; i++)
7197 CAM_INVALIDATE(config->config_table[i]);
7199 config->hdr.length = i;
7200 if (CHIP_REV_IS_SLOW(bp))
7201 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7203 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7204 config->hdr.client_id = BP_CL_ID(bp);
7205 config->hdr.reserved1 = 0;
7207 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7208 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7209 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7212 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7214 bnx2x_set_mac_addr_e1h(bp, 0);
7216 for (i = 0; i < MC_HASH_SIZE; i++)
7217 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7220 if (unload_mode == UNLOAD_NORMAL)
7221 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7223 else if (bp->flags & NO_WOL_FLAG) {
7224 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7225 if (CHIP_IS_E1H(bp))
7226 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7228 } else if (bp->wol) {
7229 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7230 u8 *mac_addr = bp->dev->dev_addr;
7232 /* The mac address is written to entries 1-4 to
7233 preserve entry 0 which is used by the PMF */
7234 u8 entry = (BP_E1HVN(bp) + 1)*8;
7236 val = (mac_addr[0] << 8) | mac_addr[1];
7237 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7239 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7240 (mac_addr[4] << 8) | mac_addr[5];
7241 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7243 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7246 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7248 /* Close multi and leading connections
7249 Completions for ramrods are collected in a synchronous way */
7250 for_each_nondefault_queue(bp, i)
7251 if (bnx2x_stop_multi(bp, i))
7254 rc = bnx2x_stop_leading(bp);
7256 BNX2X_ERR("Stop leading failed!\n");
7257 #ifdef BNX2X_STOP_ON_ERROR
7266 reset_code = bnx2x_fw_command(bp, reset_code);
7268 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7269 load_count[0], load_count[1], load_count[2]);
7271 load_count[1 + port]--;
7272 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7273 load_count[0], load_count[1], load_count[2]);
7274 if (load_count[0] == 0)
7275 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7276 else if (load_count[1 + port] == 0)
7277 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7279 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7282 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7283 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7284 bnx2x__link_reset(bp);
7286 /* Reset the chip */
7287 bnx2x_reset_chip(bp, reset_code);
7289 /* Report UNLOAD_DONE to MCP */
7291 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7294 /* Free SKBs, SGEs, TPA pool and driver internals */
7295 bnx2x_free_skbs(bp);
7296 for_each_rx_queue(bp, i)
7297 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7298 for_each_rx_queue(bp, i)
7299 netif_napi_del(&bnx2x_fp(bp, i, napi));
7302 bp->state = BNX2X_STATE_CLOSED;
7304 netif_carrier_off(bp->dev);
7309 static void bnx2x_reset_task(struct work_struct *work)
7311 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7313 #ifdef BNX2X_STOP_ON_ERROR
7314 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7315 " so reset not done to allow debug dump,\n"
7316 KERN_ERR " you will need to reboot when done\n");
7322 if (!netif_running(bp->dev))
7323 goto reset_task_exit;
7325 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7326 bnx2x_nic_load(bp, LOAD_NORMAL);
7332 /* end of nic load/unload */
7337 * Init service functions
7340 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7343 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7344 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7345 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7346 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7347 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7348 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7349 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7350 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7352 BNX2X_ERR("Unsupported function index: %d\n", func);
7357 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7359 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7361 /* Flush all outstanding writes */
7364 /* Pretend to be function 0 */
7366 /* Flush the GRC transaction (in the chip) */
7367 new_val = REG_RD(bp, reg);
7369 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7374 /* From now we are in the "like-E1" mode */
7375 bnx2x_int_disable(bp);
7377 /* Flush all outstanding writes */
7380 /* Restore the original funtion settings */
7381 REG_WR(bp, reg, orig_func);
7382 new_val = REG_RD(bp, reg);
7383 if (new_val != orig_func) {
7384 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7385 orig_func, new_val);
7390 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7392 if (CHIP_IS_E1H(bp))
7393 bnx2x_undi_int_disable_e1h(bp, func);
7395 bnx2x_int_disable(bp);
7398 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7402 /* Check if there is any driver already loaded */
7403 val = REG_RD(bp, MISC_REG_UNPREPARED);
7405 /* Check if it is the UNDI driver
7406 * UNDI driver initializes CID offset for normal bell to 0x7
7408 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7409 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7411 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7413 int func = BP_FUNC(bp);
7417 /* clear the UNDI indication */
7418 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7420 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7422 /* try unload UNDI on port 0 */
7425 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7426 DRV_MSG_SEQ_NUMBER_MASK);
7427 reset_code = bnx2x_fw_command(bp, reset_code);
7429 /* if UNDI is loaded on the other port */
7430 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7432 /* send "DONE" for previous unload */
7433 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7435 /* unload UNDI on port 1 */
7438 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7439 DRV_MSG_SEQ_NUMBER_MASK);
7440 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7442 bnx2x_fw_command(bp, reset_code);
7445 /* now it's safe to release the lock */
7446 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7448 bnx2x_undi_int_disable(bp, func);
7450 /* close input traffic and wait for it */
7451 /* Do not rcv packets to BRB */
7453 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7454 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7455 /* Do not direct rcv packets that are not for MCP to
7458 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7459 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7462 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7463 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7466 /* save NIG port swap info */
7467 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7468 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7471 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7474 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7476 /* take the NIG out of reset and restore swap values */
7478 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7479 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7480 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7481 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7483 /* send unload done to the MCP */
7484 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7486 /* restore our func and fw_seq */
7489 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7490 DRV_MSG_SEQ_NUMBER_MASK);
7493 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7497 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7499 u32 val, val2, val3, val4, id;
7502 /* Get the chip revision id and number. */
7503 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7504 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7505 id = ((val & 0xffff) << 16);
7506 val = REG_RD(bp, MISC_REG_CHIP_REV);
7507 id |= ((val & 0xf) << 12);
7508 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7509 id |= ((val & 0xff) << 4);
7510 val = REG_RD(bp, MISC_REG_BOND_ID);
7512 bp->common.chip_id = id;
7513 bp->link_params.chip_id = bp->common.chip_id;
7514 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7516 val = (REG_RD(bp, 0x2874) & 0x55);
7517 if ((bp->common.chip_id & 0x1) ||
7518 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7519 bp->flags |= ONE_PORT_FLAG;
7520 BNX2X_DEV_INFO("single port device\n");
7523 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7524 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7525 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7526 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7527 bp->common.flash_size, bp->common.flash_size);
7529 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7530 bp->link_params.shmem_base = bp->common.shmem_base;
7531 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7533 if (!bp->common.shmem_base ||
7534 (bp->common.shmem_base < 0xA0000) ||
7535 (bp->common.shmem_base >= 0xC0000)) {
7536 BNX2X_DEV_INFO("MCP not active\n");
7537 bp->flags |= NO_MCP_FLAG;
7541 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7542 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7543 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7544 BNX2X_ERR("BAD MCP validity signature\n");
7546 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7547 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7549 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7550 SHARED_HW_CFG_LED_MODE_MASK) >>
7551 SHARED_HW_CFG_LED_MODE_SHIFT);
7553 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7554 bp->common.bc_ver = val;
7555 BNX2X_DEV_INFO("bc_ver %X\n", val);
7556 if (val < BNX2X_BC_VER) {
7557 /* for now only warn
7558 * later we might need to enforce this */
7559 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7560 " please upgrade BC\n", BNX2X_BC_VER, val);
7563 if (BP_E1HVN(bp) == 0) {
7564 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7565 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7567 /* no WOL capability for E1HVN != 0 */
7568 bp->flags |= NO_WOL_FLAG;
7570 BNX2X_DEV_INFO("%sWoL capable\n",
7571 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7573 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7574 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7575 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7576 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7578 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7579 val, val2, val3, val4);
7582 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7585 int port = BP_PORT(bp);
7588 switch (switch_cfg) {
7590 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7593 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7594 switch (ext_phy_type) {
7595 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7596 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7599 bp->port.supported |= (SUPPORTED_10baseT_Half |
7600 SUPPORTED_10baseT_Full |
7601 SUPPORTED_100baseT_Half |
7602 SUPPORTED_100baseT_Full |
7603 SUPPORTED_1000baseT_Full |
7604 SUPPORTED_2500baseX_Full |
7609 SUPPORTED_Asym_Pause);
7612 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7613 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7616 bp->port.supported |= (SUPPORTED_10baseT_Half |
7617 SUPPORTED_10baseT_Full |
7618 SUPPORTED_100baseT_Half |
7619 SUPPORTED_100baseT_Full |
7620 SUPPORTED_1000baseT_Full |
7625 SUPPORTED_Asym_Pause);
7629 BNX2X_ERR("NVRAM config error. "
7630 "BAD SerDes ext_phy_config 0x%x\n",
7631 bp->link_params.ext_phy_config);
7635 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7637 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7640 case SWITCH_CFG_10G:
7641 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7644 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7645 switch (ext_phy_type) {
7646 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7647 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7650 bp->port.supported |= (SUPPORTED_10baseT_Half |
7651 SUPPORTED_10baseT_Full |
7652 SUPPORTED_100baseT_Half |
7653 SUPPORTED_100baseT_Full |
7654 SUPPORTED_1000baseT_Full |
7655 SUPPORTED_2500baseX_Full |
7656 SUPPORTED_10000baseT_Full |
7661 SUPPORTED_Asym_Pause);
7664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7665 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7668 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7669 SUPPORTED_1000baseT_Full |
7673 SUPPORTED_Asym_Pause);
7676 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7677 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7680 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7681 SUPPORTED_2500baseX_Full |
7682 SUPPORTED_1000baseT_Full |
7686 SUPPORTED_Asym_Pause);
7689 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7690 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7693 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7696 SUPPORTED_Asym_Pause);
7699 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7700 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7703 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7704 SUPPORTED_1000baseT_Full |
7707 SUPPORTED_Asym_Pause);
7710 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7711 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7714 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7715 SUPPORTED_1000baseT_Full |
7719 SUPPORTED_Asym_Pause);
7722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7723 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7726 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7730 SUPPORTED_Asym_Pause);
7733 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7734 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7735 bp->link_params.ext_phy_config);
7739 BNX2X_ERR("NVRAM config error. "
7740 "BAD XGXS ext_phy_config 0x%x\n",
7741 bp->link_params.ext_phy_config);
7745 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7747 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7752 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7753 bp->port.link_config);
7756 bp->link_params.phy_addr = bp->port.phy_addr;
7758 /* mask what we support according to speed_cap_mask */
7759 if (!(bp->link_params.speed_cap_mask &
7760 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7761 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7763 if (!(bp->link_params.speed_cap_mask &
7764 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7765 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7767 if (!(bp->link_params.speed_cap_mask &
7768 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7769 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7771 if (!(bp->link_params.speed_cap_mask &
7772 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7773 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7775 if (!(bp->link_params.speed_cap_mask &
7776 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7777 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7778 SUPPORTED_1000baseT_Full);
7780 if (!(bp->link_params.speed_cap_mask &
7781 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7782 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7784 if (!(bp->link_params.speed_cap_mask &
7785 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7786 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7788 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7791 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7793 bp->link_params.req_duplex = DUPLEX_FULL;
7795 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7796 case PORT_FEATURE_LINK_SPEED_AUTO:
7797 if (bp->port.supported & SUPPORTED_Autoneg) {
7798 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7799 bp->port.advertising = bp->port.supported;
7802 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7804 if ((ext_phy_type ==
7805 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7807 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7808 /* force 10G, no AN */
7809 bp->link_params.req_line_speed = SPEED_10000;
7810 bp->port.advertising =
7811 (ADVERTISED_10000baseT_Full |
7815 BNX2X_ERR("NVRAM config error. "
7816 "Invalid link_config 0x%x"
7817 " Autoneg not supported\n",
7818 bp->port.link_config);
7823 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7824 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7825 bp->link_params.req_line_speed = SPEED_10;
7826 bp->port.advertising = (ADVERTISED_10baseT_Full |
7829 BNX2X_ERR("NVRAM config error. "
7830 "Invalid link_config 0x%x"
7831 " speed_cap_mask 0x%x\n",
7832 bp->port.link_config,
7833 bp->link_params.speed_cap_mask);
7838 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7839 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7840 bp->link_params.req_line_speed = SPEED_10;
7841 bp->link_params.req_duplex = DUPLEX_HALF;
7842 bp->port.advertising = (ADVERTISED_10baseT_Half |
7845 BNX2X_ERR("NVRAM config error. "
7846 "Invalid link_config 0x%x"
7847 " speed_cap_mask 0x%x\n",
7848 bp->port.link_config,
7849 bp->link_params.speed_cap_mask);
7854 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7855 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7856 bp->link_params.req_line_speed = SPEED_100;
7857 bp->port.advertising = (ADVERTISED_100baseT_Full |
7860 BNX2X_ERR("NVRAM config error. "
7861 "Invalid link_config 0x%x"
7862 " speed_cap_mask 0x%x\n",
7863 bp->port.link_config,
7864 bp->link_params.speed_cap_mask);
7869 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7870 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7871 bp->link_params.req_line_speed = SPEED_100;
7872 bp->link_params.req_duplex = DUPLEX_HALF;
7873 bp->port.advertising = (ADVERTISED_100baseT_Half |
7876 BNX2X_ERR("NVRAM config error. "
7877 "Invalid link_config 0x%x"
7878 " speed_cap_mask 0x%x\n",
7879 bp->port.link_config,
7880 bp->link_params.speed_cap_mask);
7885 case PORT_FEATURE_LINK_SPEED_1G:
7886 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7887 bp->link_params.req_line_speed = SPEED_1000;
7888 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7891 BNX2X_ERR("NVRAM config error. "
7892 "Invalid link_config 0x%x"
7893 " speed_cap_mask 0x%x\n",
7894 bp->port.link_config,
7895 bp->link_params.speed_cap_mask);
7900 case PORT_FEATURE_LINK_SPEED_2_5G:
7901 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7902 bp->link_params.req_line_speed = SPEED_2500;
7903 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7906 BNX2X_ERR("NVRAM config error. "
7907 "Invalid link_config 0x%x"
7908 " speed_cap_mask 0x%x\n",
7909 bp->port.link_config,
7910 bp->link_params.speed_cap_mask);
7915 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7916 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7917 case PORT_FEATURE_LINK_SPEED_10G_KR:
7918 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7919 bp->link_params.req_line_speed = SPEED_10000;
7920 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7923 BNX2X_ERR("NVRAM config error. "
7924 "Invalid link_config 0x%x"
7925 " speed_cap_mask 0x%x\n",
7926 bp->port.link_config,
7927 bp->link_params.speed_cap_mask);
7933 BNX2X_ERR("NVRAM config error. "
7934 "BAD link speed link_config 0x%x\n",
7935 bp->port.link_config);
7936 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7937 bp->port.advertising = bp->port.supported;
7941 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7942 PORT_FEATURE_FLOW_CONTROL_MASK);
7943 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7944 !(bp->port.supported & SUPPORTED_Autoneg))
7945 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7947 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7948 " advertising 0x%x\n",
7949 bp->link_params.req_line_speed,
7950 bp->link_params.req_duplex,
7951 bp->link_params.req_flow_ctrl, bp->port.advertising);
7954 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7956 int port = BP_PORT(bp);
7960 bp->link_params.bp = bp;
7961 bp->link_params.port = port;
7963 bp->link_params.serdes_config =
7964 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7965 bp->link_params.lane_config =
7966 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7967 bp->link_params.ext_phy_config =
7969 dev_info.port_hw_config[port].external_phy_config);
7970 bp->link_params.speed_cap_mask =
7972 dev_info.port_hw_config[port].speed_capability_mask);
7974 bp->port.link_config =
7975 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7977 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
7978 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
7979 bp->link_params.feature_config_flags |=
7980 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
7982 bp->link_params.feature_config_flags &=
7983 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
7985 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7986 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7987 " link_config 0x%08x\n",
7988 bp->link_params.serdes_config,
7989 bp->link_params.lane_config,
7990 bp->link_params.ext_phy_config,
7991 bp->link_params.speed_cap_mask, bp->port.link_config);
7993 bp->link_params.switch_cfg = (bp->port.link_config &
7994 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7995 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7997 bnx2x_link_settings_requested(bp);
7999 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8000 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8001 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8002 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8003 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8004 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8005 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8006 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8007 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8008 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8011 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8013 int func = BP_FUNC(bp);
8017 bnx2x_get_common_hwinfo(bp);
8021 if (CHIP_IS_E1H(bp)) {
8023 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8025 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8026 FUNC_MF_CFG_E1HOV_TAG_MASK);
8027 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8031 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8033 func, bp->e1hov, bp->e1hov);
8035 BNX2X_DEV_INFO("Single function mode\n");
8037 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8038 " aborting\n", func);
8044 if (!BP_NOMCP(bp)) {
8045 bnx2x_get_port_hwinfo(bp);
8047 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8048 DRV_MSG_SEQ_NUMBER_MASK);
8049 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8053 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8054 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8055 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8056 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8057 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8058 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8059 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8060 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8061 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8062 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8063 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8065 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8073 /* only supposed to happen on emulation/FPGA */
8074 BNX2X_ERR("warning random MAC workaround active\n");
8075 random_ether_addr(bp->dev->dev_addr);
8076 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8082 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8084 int func = BP_FUNC(bp);
8088 /* Disable interrupt handling until HW is initialized */
8089 atomic_set(&bp->intr_sem, 1);
8091 mutex_init(&bp->port.phy_mutex);
8093 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8094 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8096 rc = bnx2x_get_hwinfo(bp);
8098 /* need to reset chip if undi was active */
8100 bnx2x_undi_unload(bp);
8102 if (CHIP_REV_IS_FPGA(bp))
8103 printk(KERN_ERR PFX "FPGA detected\n");
8105 if (BP_NOMCP(bp) && (func == 0))
8107 "MCP disabled, must load devices in order!\n");
8109 /* Set multi queue mode */
8110 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8111 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8113 "Multi disabled since int_mode requested is not MSI-X\n");
8114 multi_mode = ETH_RSS_MODE_DISABLED;
8116 bp->multi_mode = multi_mode;
8121 bp->flags &= ~TPA_ENABLE_FLAG;
8122 bp->dev->features &= ~NETIF_F_LRO;
8124 bp->flags |= TPA_ENABLE_FLAG;
8125 bp->dev->features |= NETIF_F_LRO;
8129 bp->tx_ring_size = MAX_TX_AVAIL;
8130 bp->rx_ring_size = MAX_RX_AVAIL;
8137 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8138 bp->current_interval = (poll ? poll : timer_interval);
8140 init_timer(&bp->timer);
8141 bp->timer.expires = jiffies + bp->current_interval;
8142 bp->timer.data = (unsigned long) bp;
8143 bp->timer.function = bnx2x_timer;
8149 * ethtool service functions
8152 /* All ethtool functions called with rtnl_lock */
8154 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8156 struct bnx2x *bp = netdev_priv(dev);
8158 cmd->supported = bp->port.supported;
8159 cmd->advertising = bp->port.advertising;
8161 if (netif_carrier_ok(dev)) {
8162 cmd->speed = bp->link_vars.line_speed;
8163 cmd->duplex = bp->link_vars.duplex;
8165 cmd->speed = bp->link_params.req_line_speed;
8166 cmd->duplex = bp->link_params.req_duplex;
8171 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8172 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8173 if (vn_max_rate < cmd->speed)
8174 cmd->speed = vn_max_rate;
8177 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8179 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8181 switch (ext_phy_type) {
8182 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8183 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8184 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8185 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8186 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8188 cmd->port = PORT_FIBRE;
8191 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8192 cmd->port = PORT_TP;
8195 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8196 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8197 bp->link_params.ext_phy_config);
8201 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8202 bp->link_params.ext_phy_config);
8206 cmd->port = PORT_TP;
8208 cmd->phy_address = bp->port.phy_addr;
8209 cmd->transceiver = XCVR_INTERNAL;
8211 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8212 cmd->autoneg = AUTONEG_ENABLE;
8214 cmd->autoneg = AUTONEG_DISABLE;
8219 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8220 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8221 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8222 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8223 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8224 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8225 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8230 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8232 struct bnx2x *bp = netdev_priv(dev);
8238 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8239 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8240 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8241 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8242 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8243 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8244 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8246 if (cmd->autoneg == AUTONEG_ENABLE) {
8247 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8248 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8252 /* advertise the requested speed and duplex if supported */
8253 cmd->advertising &= bp->port.supported;
8255 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8256 bp->link_params.req_duplex = DUPLEX_FULL;
8257 bp->port.advertising |= (ADVERTISED_Autoneg |
8260 } else { /* forced speed */
8261 /* advertise the requested speed and duplex if supported */
8262 switch (cmd->speed) {
8264 if (cmd->duplex == DUPLEX_FULL) {
8265 if (!(bp->port.supported &
8266 SUPPORTED_10baseT_Full)) {
8268 "10M full not supported\n");
8272 advertising = (ADVERTISED_10baseT_Full |
8275 if (!(bp->port.supported &
8276 SUPPORTED_10baseT_Half)) {
8278 "10M half not supported\n");
8282 advertising = (ADVERTISED_10baseT_Half |
8288 if (cmd->duplex == DUPLEX_FULL) {
8289 if (!(bp->port.supported &
8290 SUPPORTED_100baseT_Full)) {
8292 "100M full not supported\n");
8296 advertising = (ADVERTISED_100baseT_Full |
8299 if (!(bp->port.supported &
8300 SUPPORTED_100baseT_Half)) {
8302 "100M half not supported\n");
8306 advertising = (ADVERTISED_100baseT_Half |
8312 if (cmd->duplex != DUPLEX_FULL) {
8313 DP(NETIF_MSG_LINK, "1G half not supported\n");
8317 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8318 DP(NETIF_MSG_LINK, "1G full not supported\n");
8322 advertising = (ADVERTISED_1000baseT_Full |
8327 if (cmd->duplex != DUPLEX_FULL) {
8329 "2.5G half not supported\n");
8333 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8335 "2.5G full not supported\n");
8339 advertising = (ADVERTISED_2500baseX_Full |
8344 if (cmd->duplex != DUPLEX_FULL) {
8345 DP(NETIF_MSG_LINK, "10G half not supported\n");
8349 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8350 DP(NETIF_MSG_LINK, "10G full not supported\n");
8354 advertising = (ADVERTISED_10000baseT_Full |
8359 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8363 bp->link_params.req_line_speed = cmd->speed;
8364 bp->link_params.req_duplex = cmd->duplex;
8365 bp->port.advertising = advertising;
8368 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8369 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8370 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8371 bp->port.advertising);
8373 if (netif_running(dev)) {
8374 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8381 #define PHY_FW_VER_LEN 10
8383 static void bnx2x_get_drvinfo(struct net_device *dev,
8384 struct ethtool_drvinfo *info)
8386 struct bnx2x *bp = netdev_priv(dev);
8387 u8 phy_fw_ver[PHY_FW_VER_LEN];
8389 strcpy(info->driver, DRV_MODULE_NAME);
8390 strcpy(info->version, DRV_MODULE_VERSION);
8392 phy_fw_ver[0] = '\0';
8394 bnx2x_acquire_phy_lock(bp);
8395 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8396 (bp->state != BNX2X_STATE_CLOSED),
8397 phy_fw_ver, PHY_FW_VER_LEN);
8398 bnx2x_release_phy_lock(bp);
8401 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8402 (bp->common.bc_ver & 0xff0000) >> 16,
8403 (bp->common.bc_ver & 0xff00) >> 8,
8404 (bp->common.bc_ver & 0xff),
8405 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8406 strcpy(info->bus_info, pci_name(bp->pdev));
8407 info->n_stats = BNX2X_NUM_STATS;
8408 info->testinfo_len = BNX2X_NUM_TESTS;
8409 info->eedump_len = bp->common.flash_size;
8410 info->regdump_len = 0;
8413 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8415 struct bnx2x *bp = netdev_priv(dev);
8417 if (bp->flags & NO_WOL_FLAG) {
8421 wol->supported = WAKE_MAGIC;
8423 wol->wolopts = WAKE_MAGIC;
8427 memset(&wol->sopass, 0, sizeof(wol->sopass));
8430 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8432 struct bnx2x *bp = netdev_priv(dev);
8434 if (wol->wolopts & ~WAKE_MAGIC)
8437 if (wol->wolopts & WAKE_MAGIC) {
8438 if (bp->flags & NO_WOL_FLAG)
8448 static u32 bnx2x_get_msglevel(struct net_device *dev)
8450 struct bnx2x *bp = netdev_priv(dev);
8452 return bp->msglevel;
8455 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8457 struct bnx2x *bp = netdev_priv(dev);
8459 if (capable(CAP_NET_ADMIN))
8460 bp->msglevel = level;
8463 static int bnx2x_nway_reset(struct net_device *dev)
8465 struct bnx2x *bp = netdev_priv(dev);
8470 if (netif_running(dev)) {
8471 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8478 static int bnx2x_get_eeprom_len(struct net_device *dev)
8480 struct bnx2x *bp = netdev_priv(dev);
8482 return bp->common.flash_size;
8485 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8487 int port = BP_PORT(bp);
8491 /* adjust timeout for emulation/FPGA */
8492 count = NVRAM_TIMEOUT_COUNT;
8493 if (CHIP_REV_IS_SLOW(bp))
8496 /* request access to nvram interface */
8497 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8498 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8500 for (i = 0; i < count*10; i++) {
8501 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8502 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8508 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8509 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8516 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8518 int port = BP_PORT(bp);
8522 /* adjust timeout for emulation/FPGA */
8523 count = NVRAM_TIMEOUT_COUNT;
8524 if (CHIP_REV_IS_SLOW(bp))
8527 /* relinquish nvram interface */
8528 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8529 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8531 for (i = 0; i < count*10; i++) {
8532 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8533 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8539 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8540 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8547 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8551 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8553 /* enable both bits, even on read */
8554 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8555 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8556 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8559 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8563 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8565 /* disable both bits, even after read */
8566 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8567 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8568 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8571 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8577 /* build the command word */
8578 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8580 /* need to clear DONE bit separately */
8581 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8583 /* address of the NVRAM to read from */
8584 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8585 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8587 /* issue a read command */
8588 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8590 /* adjust timeout for emulation/FPGA */
8591 count = NVRAM_TIMEOUT_COUNT;
8592 if (CHIP_REV_IS_SLOW(bp))
8595 /* wait for completion */
8598 for (i = 0; i < count; i++) {
8600 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8602 if (val & MCPR_NVM_COMMAND_DONE) {
8603 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8604 /* we read nvram data in cpu order
8605 * but ethtool sees it as an array of bytes
8606 * converting to big-endian will do the work */
8607 val = cpu_to_be32(val);
8617 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8624 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8626 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8631 if (offset + buf_size > bp->common.flash_size) {
8632 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8633 " buf_size (0x%x) > flash_size (0x%x)\n",
8634 offset, buf_size, bp->common.flash_size);
8638 /* request access to nvram interface */
8639 rc = bnx2x_acquire_nvram_lock(bp);
8643 /* enable access to nvram interface */
8644 bnx2x_enable_nvram_access(bp);
8646 /* read the first word(s) */
8647 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8648 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8649 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8650 memcpy(ret_buf, &val, 4);
8652 /* advance to the next dword */
8653 offset += sizeof(u32);
8654 ret_buf += sizeof(u32);
8655 buf_size -= sizeof(u32);
8660 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8661 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8662 memcpy(ret_buf, &val, 4);
8665 /* disable access to nvram interface */
8666 bnx2x_disable_nvram_access(bp);
8667 bnx2x_release_nvram_lock(bp);
8672 static int bnx2x_get_eeprom(struct net_device *dev,
8673 struct ethtool_eeprom *eeprom, u8 *eebuf)
8675 struct bnx2x *bp = netdev_priv(dev);
8678 if (!netif_running(dev))
8681 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8682 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8683 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8684 eeprom->len, eeprom->len);
8686 /* parameters already validated in ethtool_get_eeprom */
8688 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8693 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8698 /* build the command word */
8699 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8701 /* need to clear DONE bit separately */
8702 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8704 /* write the data */
8705 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8707 /* address of the NVRAM to write to */
8708 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8709 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8711 /* issue the write command */
8712 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8714 /* adjust timeout for emulation/FPGA */
8715 count = NVRAM_TIMEOUT_COUNT;
8716 if (CHIP_REV_IS_SLOW(bp))
8719 /* wait for completion */
8721 for (i = 0; i < count; i++) {
8723 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8724 if (val & MCPR_NVM_COMMAND_DONE) {
8733 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8735 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8743 if (offset + buf_size > bp->common.flash_size) {
8744 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8745 " buf_size (0x%x) > flash_size (0x%x)\n",
8746 offset, buf_size, bp->common.flash_size);
8750 /* request access to nvram interface */
8751 rc = bnx2x_acquire_nvram_lock(bp);
8755 /* enable access to nvram interface */
8756 bnx2x_enable_nvram_access(bp);
8758 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8759 align_offset = (offset & ~0x03);
8760 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8763 val &= ~(0xff << BYTE_OFFSET(offset));
8764 val |= (*data_buf << BYTE_OFFSET(offset));
8766 /* nvram data is returned as an array of bytes
8767 * convert it back to cpu order */
8768 val = be32_to_cpu(val);
8770 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8774 /* disable access to nvram interface */
8775 bnx2x_disable_nvram_access(bp);
8776 bnx2x_release_nvram_lock(bp);
8781 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8789 if (buf_size == 1) /* ethtool */
8790 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8792 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8794 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8799 if (offset + buf_size > bp->common.flash_size) {
8800 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8801 " buf_size (0x%x) > flash_size (0x%x)\n",
8802 offset, buf_size, bp->common.flash_size);
8806 /* request access to nvram interface */
8807 rc = bnx2x_acquire_nvram_lock(bp);
8811 /* enable access to nvram interface */
8812 bnx2x_enable_nvram_access(bp);
8815 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8816 while ((written_so_far < buf_size) && (rc == 0)) {
8817 if (written_so_far == (buf_size - sizeof(u32)))
8818 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8819 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8820 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8821 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8822 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8824 memcpy(&val, data_buf, 4);
8826 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8828 /* advance to the next dword */
8829 offset += sizeof(u32);
8830 data_buf += sizeof(u32);
8831 written_so_far += sizeof(u32);
8835 /* disable access to nvram interface */
8836 bnx2x_disable_nvram_access(bp);
8837 bnx2x_release_nvram_lock(bp);
8842 static int bnx2x_set_eeprom(struct net_device *dev,
8843 struct ethtool_eeprom *eeprom, u8 *eebuf)
8845 struct bnx2x *bp = netdev_priv(dev);
8848 if (!netif_running(dev))
8851 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8852 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8853 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8854 eeprom->len, eeprom->len);
8856 /* parameters already validated in ethtool_set_eeprom */
8858 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8859 if (eeprom->magic == 0x00504859)
8862 bnx2x_acquire_phy_lock(bp);
8863 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8864 bp->link_params.ext_phy_config,
8865 (bp->state != BNX2X_STATE_CLOSED),
8866 eebuf, eeprom->len);
8867 if ((bp->state == BNX2X_STATE_OPEN) ||
8868 (bp->state == BNX2X_STATE_DISABLED)) {
8869 rc |= bnx2x_link_reset(&bp->link_params,
8871 rc |= bnx2x_phy_init(&bp->link_params,
8874 bnx2x_release_phy_lock(bp);
8876 } else /* Only the PMF can access the PHY */
8879 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8884 static int bnx2x_get_coalesce(struct net_device *dev,
8885 struct ethtool_coalesce *coal)
8887 struct bnx2x *bp = netdev_priv(dev);
8889 memset(coal, 0, sizeof(struct ethtool_coalesce));
8891 coal->rx_coalesce_usecs = bp->rx_ticks;
8892 coal->tx_coalesce_usecs = bp->tx_ticks;
8897 static int bnx2x_set_coalesce(struct net_device *dev,
8898 struct ethtool_coalesce *coal)
8900 struct bnx2x *bp = netdev_priv(dev);
8902 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8903 if (bp->rx_ticks > 3000)
8904 bp->rx_ticks = 3000;
8906 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8907 if (bp->tx_ticks > 0x3000)
8908 bp->tx_ticks = 0x3000;
8910 if (netif_running(dev))
8911 bnx2x_update_coalesce(bp);
8916 static void bnx2x_get_ringparam(struct net_device *dev,
8917 struct ethtool_ringparam *ering)
8919 struct bnx2x *bp = netdev_priv(dev);
8921 ering->rx_max_pending = MAX_RX_AVAIL;
8922 ering->rx_mini_max_pending = 0;
8923 ering->rx_jumbo_max_pending = 0;
8925 ering->rx_pending = bp->rx_ring_size;
8926 ering->rx_mini_pending = 0;
8927 ering->rx_jumbo_pending = 0;
8929 ering->tx_max_pending = MAX_TX_AVAIL;
8930 ering->tx_pending = bp->tx_ring_size;
8933 static int bnx2x_set_ringparam(struct net_device *dev,
8934 struct ethtool_ringparam *ering)
8936 struct bnx2x *bp = netdev_priv(dev);
8939 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8940 (ering->tx_pending > MAX_TX_AVAIL) ||
8941 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8944 bp->rx_ring_size = ering->rx_pending;
8945 bp->tx_ring_size = ering->tx_pending;
8947 if (netif_running(dev)) {
8948 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8949 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8955 static void bnx2x_get_pauseparam(struct net_device *dev,
8956 struct ethtool_pauseparam *epause)
8958 struct bnx2x *bp = netdev_priv(dev);
8960 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8961 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8963 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8964 BNX2X_FLOW_CTRL_RX);
8965 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8966 BNX2X_FLOW_CTRL_TX);
8968 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8969 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8970 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8973 static int bnx2x_set_pauseparam(struct net_device *dev,
8974 struct ethtool_pauseparam *epause)
8976 struct bnx2x *bp = netdev_priv(dev);
8981 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8982 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8983 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8985 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8987 if (epause->rx_pause)
8988 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8990 if (epause->tx_pause)
8991 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8993 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8994 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8996 if (epause->autoneg) {
8997 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8998 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9002 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9003 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9007 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9009 if (netif_running(dev)) {
9010 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9017 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9019 struct bnx2x *bp = netdev_priv(dev);
9023 /* TPA requires Rx CSUM offloading */
9024 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9025 if (!(dev->features & NETIF_F_LRO)) {
9026 dev->features |= NETIF_F_LRO;
9027 bp->flags |= TPA_ENABLE_FLAG;
9031 } else if (dev->features & NETIF_F_LRO) {
9032 dev->features &= ~NETIF_F_LRO;
9033 bp->flags &= ~TPA_ENABLE_FLAG;
9037 if (changed && netif_running(dev)) {
9038 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9039 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9045 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9047 struct bnx2x *bp = netdev_priv(dev);
9052 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9054 struct bnx2x *bp = netdev_priv(dev);
9059 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9060 TPA'ed packets will be discarded due to wrong TCP CSUM */
9062 u32 flags = ethtool_op_get_flags(dev);
9064 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9070 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9073 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9074 dev->features |= NETIF_F_TSO6;
9076 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9077 dev->features &= ~NETIF_F_TSO6;
9083 static const struct {
9084 char string[ETH_GSTRING_LEN];
9085 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9086 { "register_test (offline)" },
9087 { "memory_test (offline)" },
9088 { "loopback_test (offline)" },
9089 { "nvram_test (online)" },
9090 { "interrupt_test (online)" },
9091 { "link_test (online)" },
9092 { "idle check (online)" }
9095 static int bnx2x_self_test_count(struct net_device *dev)
9097 return BNX2X_NUM_TESTS;
9100 static int bnx2x_test_registers(struct bnx2x *bp)
9102 int idx, i, rc = -ENODEV;
9104 int port = BP_PORT(bp);
9105 static const struct {
9110 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9111 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9112 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9113 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9114 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9115 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9116 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9117 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9118 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9119 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9120 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9121 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9122 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9123 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9124 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9125 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9126 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9127 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9128 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9129 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9130 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9131 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9132 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9133 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9134 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9135 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9136 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9137 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9138 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9139 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9140 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9141 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9142 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9143 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9144 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9145 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9146 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9147 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9149 { 0xffffffff, 0, 0x00000000 }
9152 if (!netif_running(bp->dev))
9155 /* Repeat the test twice:
9156 First by writing 0x00000000, second by writing 0xffffffff */
9157 for (idx = 0; idx < 2; idx++) {
9164 wr_val = 0xffffffff;
9168 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9169 u32 offset, mask, save_val, val;
9171 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9172 mask = reg_tbl[i].mask;
9174 save_val = REG_RD(bp, offset);
9176 REG_WR(bp, offset, wr_val);
9177 val = REG_RD(bp, offset);
9179 /* Restore the original register's value */
9180 REG_WR(bp, offset, save_val);
9182 /* verify that value is as expected value */
9183 if ((val & mask) != (wr_val & mask))
9194 static int bnx2x_test_memory(struct bnx2x *bp)
9196 int i, j, rc = -ENODEV;
9198 static const struct {
9202 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9203 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9204 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9205 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9206 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9207 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9208 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9212 static const struct {
9218 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9219 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9220 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9221 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9222 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9223 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9225 { NULL, 0xffffffff, 0, 0 }
9228 if (!netif_running(bp->dev))
9231 /* Go through all the memories */
9232 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9233 for (j = 0; j < mem_tbl[i].size; j++)
9234 REG_RD(bp, mem_tbl[i].offset + j*4);
9236 /* Check the parity status */
9237 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9238 val = REG_RD(bp, prty_tbl[i].offset);
9239 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9240 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9242 "%s is 0x%x\n", prty_tbl[i].name, val);
9253 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9258 while (bnx2x_link_test(bp) && cnt--)
9262 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9264 unsigned int pkt_size, num_pkts, i;
9265 struct sk_buff *skb;
9266 unsigned char *packet;
9267 struct bnx2x_fastpath *fp = &bp->fp[0];
9268 u16 tx_start_idx, tx_idx;
9269 u16 rx_start_idx, rx_idx;
9271 struct sw_tx_bd *tx_buf;
9272 struct eth_tx_bd *tx_bd;
9274 union eth_rx_cqe *cqe;
9276 struct sw_rx_bd *rx_buf;
9280 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9281 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9282 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9284 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
9286 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
9287 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9288 /* wait until link state is restored */
9290 while (cnt-- && bnx2x_test_link(&bp->link_params,
9297 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9300 goto test_loopback_exit;
9302 packet = skb_put(skb, pkt_size);
9303 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9304 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9305 for (i = ETH_HLEN; i < pkt_size; i++)
9306 packet[i] = (unsigned char) (i & 0xff);
9309 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9310 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9312 pkt_prod = fp->tx_pkt_prod++;
9313 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9314 tx_buf->first_bd = fp->tx_bd_prod;
9317 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9318 mapping = pci_map_single(bp->pdev, skb->data,
9319 skb_headlen(skb), PCI_DMA_TODEVICE);
9320 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9321 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9322 tx_bd->nbd = cpu_to_le16(1);
9323 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9324 tx_bd->vlan = cpu_to_le16(pkt_prod);
9325 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9326 ETH_TX_BD_FLAGS_END_BD);
9327 tx_bd->general_data = ((UNICAST_ADDRESS <<
9328 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9332 fp->hw_tx_prods->bds_prod =
9333 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9334 mb(); /* FW restriction: must not reorder writing nbd and packets */
9335 fp->hw_tx_prods->packets_prod =
9336 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9337 DOORBELL(bp, FP_IDX(fp), 0);
9343 bp->dev->trans_start = jiffies;
9347 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9348 if (tx_idx != tx_start_idx + num_pkts)
9349 goto test_loopback_exit;
9351 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9352 if (rx_idx != rx_start_idx + num_pkts)
9353 goto test_loopback_exit;
9355 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9356 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9357 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9358 goto test_loopback_rx_exit;
9360 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9361 if (len != pkt_size)
9362 goto test_loopback_rx_exit;
9364 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9366 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9367 for (i = ETH_HLEN; i < pkt_size; i++)
9368 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9369 goto test_loopback_rx_exit;
9373 test_loopback_rx_exit:
9375 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9376 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9377 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9378 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9380 /* Update producers */
9381 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9385 bp->link_params.loopback_mode = LOOPBACK_NONE;
9390 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9394 if (!netif_running(bp->dev))
9395 return BNX2X_LOOPBACK_FAILED;
9397 bnx2x_netif_stop(bp, 1);
9398 bnx2x_acquire_phy_lock(bp);
9400 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9401 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9402 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9405 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9406 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9407 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9410 bnx2x_release_phy_lock(bp);
9411 bnx2x_netif_start(bp);
9416 #define CRC32_RESIDUAL 0xdebb20e3
9418 static int bnx2x_test_nvram(struct bnx2x *bp)
9420 static const struct {
9424 { 0, 0x14 }, /* bootstrap */
9425 { 0x14, 0xec }, /* dir */
9426 { 0x100, 0x350 }, /* manuf_info */
9427 { 0x450, 0xf0 }, /* feature_info */
9428 { 0x640, 0x64 }, /* upgrade_key_info */
9430 { 0x708, 0x70 }, /* manuf_key_info */
9435 u8 *data = (u8 *)buf;
9439 rc = bnx2x_nvram_read(bp, 0, data, 4);
9441 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9442 goto test_nvram_exit;
9445 magic = be32_to_cpu(buf[0]);
9446 if (magic != 0x669955aa) {
9447 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9449 goto test_nvram_exit;
9452 for (i = 0; nvram_tbl[i].size; i++) {
9454 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9458 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9459 goto test_nvram_exit;
9462 csum = ether_crc_le(nvram_tbl[i].size, data);
9463 if (csum != CRC32_RESIDUAL) {
9465 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9467 goto test_nvram_exit;
9475 static int bnx2x_test_intr(struct bnx2x *bp)
9477 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9480 if (!netif_running(bp->dev))
9483 config->hdr.length = 0;
9485 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9487 config->hdr.offset = BP_FUNC(bp);
9488 config->hdr.client_id = BP_CL_ID(bp);
9489 config->hdr.reserved1 = 0;
9491 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9492 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9493 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9495 bp->set_mac_pending++;
9496 for (i = 0; i < 10; i++) {
9497 if (!bp->set_mac_pending)
9499 msleep_interruptible(10);
9508 static void bnx2x_self_test(struct net_device *dev,
9509 struct ethtool_test *etest, u64 *buf)
9511 struct bnx2x *bp = netdev_priv(dev);
9513 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9515 if (!netif_running(dev))
9518 /* offline tests are not supported in MF mode */
9520 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9522 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9525 link_up = bp->link_vars.link_up;
9526 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9527 bnx2x_nic_load(bp, LOAD_DIAG);
9528 /* wait until link state is restored */
9529 bnx2x_wait_for_link(bp, link_up);
9531 if (bnx2x_test_registers(bp) != 0) {
9533 etest->flags |= ETH_TEST_FL_FAILED;
9535 if (bnx2x_test_memory(bp) != 0) {
9537 etest->flags |= ETH_TEST_FL_FAILED;
9539 buf[2] = bnx2x_test_loopback(bp, link_up);
9541 etest->flags |= ETH_TEST_FL_FAILED;
9543 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9544 bnx2x_nic_load(bp, LOAD_NORMAL);
9545 /* wait until link state is restored */
9546 bnx2x_wait_for_link(bp, link_up);
9548 if (bnx2x_test_nvram(bp) != 0) {
9550 etest->flags |= ETH_TEST_FL_FAILED;
9552 if (bnx2x_test_intr(bp) != 0) {
9554 etest->flags |= ETH_TEST_FL_FAILED;
9557 if (bnx2x_link_test(bp) != 0) {
9559 etest->flags |= ETH_TEST_FL_FAILED;
9562 #ifdef BNX2X_EXTRA_DEBUG
9563 bnx2x_panic_dump(bp);
9567 static const struct {
9570 u8 string[ETH_GSTRING_LEN];
9571 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9572 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9573 { Q_STATS_OFFSET32(error_bytes_received_hi),
9574 8, "[%d]: rx_error_bytes" },
9575 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9576 8, "[%d]: rx_ucast_packets" },
9577 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9578 8, "[%d]: rx_mcast_packets" },
9579 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9580 8, "[%d]: rx_bcast_packets" },
9581 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9582 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9583 4, "[%d]: rx_phy_ip_err_discards"},
9584 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9585 4, "[%d]: rx_skb_alloc_discard" },
9586 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9588 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9589 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9590 8, "[%d]: tx_packets" }
9593 static const struct {
9597 #define STATS_FLAGS_PORT 1
9598 #define STATS_FLAGS_FUNC 2
9599 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9600 u8 string[ETH_GSTRING_LEN];
9601 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9602 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9603 8, STATS_FLAGS_BOTH, "rx_bytes" },
9604 { STATS_OFFSET32(error_bytes_received_hi),
9605 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9606 { STATS_OFFSET32(total_unicast_packets_received_hi),
9607 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9608 { STATS_OFFSET32(total_multicast_packets_received_hi),
9609 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9610 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9611 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9612 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9613 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9614 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9615 8, STATS_FLAGS_PORT, "rx_align_errors" },
9616 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9617 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9618 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9619 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9620 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9621 8, STATS_FLAGS_PORT, "rx_fragments" },
9622 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9623 8, STATS_FLAGS_PORT, "rx_jabbers" },
9624 { STATS_OFFSET32(no_buff_discard_hi),
9625 8, STATS_FLAGS_BOTH, "rx_discards" },
9626 { STATS_OFFSET32(mac_filter_discard),
9627 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9628 { STATS_OFFSET32(xxoverflow_discard),
9629 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9630 { STATS_OFFSET32(brb_drop_hi),
9631 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9632 { STATS_OFFSET32(brb_truncate_hi),
9633 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9634 { STATS_OFFSET32(pause_frames_received_hi),
9635 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9636 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9637 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9638 { STATS_OFFSET32(nig_timer_max),
9639 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9640 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9641 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9642 { STATS_OFFSET32(rx_skb_alloc_failed),
9643 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9644 { STATS_OFFSET32(hw_csum_err),
9645 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9647 { STATS_OFFSET32(total_bytes_transmitted_hi),
9648 8, STATS_FLAGS_BOTH, "tx_bytes" },
9649 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9650 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9651 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9652 8, STATS_FLAGS_BOTH, "tx_packets" },
9653 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9654 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9655 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9656 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9657 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9658 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9659 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9660 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9661 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9662 8, STATS_FLAGS_PORT, "tx_deferred" },
9663 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9664 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9665 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9666 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9667 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9668 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9669 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9670 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9671 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9672 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9673 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9674 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9675 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9676 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9677 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9678 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9679 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9680 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9681 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9682 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9683 { STATS_OFFSET32(pause_frames_sent_hi),
9684 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9687 #define IS_PORT_STAT(i) \
9688 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9689 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9690 #define IS_E1HMF_MODE_STAT(bp) \
9691 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9693 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9695 struct bnx2x *bp = netdev_priv(dev);
9698 switch (stringset) {
9702 for_each_queue(bp, i) {
9703 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9704 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9705 bnx2x_q_stats_arr[j].string, i);
9706 k += BNX2X_NUM_Q_STATS;
9708 if (IS_E1HMF_MODE_STAT(bp))
9710 for (j = 0; j < BNX2X_NUM_STATS; j++)
9711 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9712 bnx2x_stats_arr[j].string);
9714 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9715 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9717 strcpy(buf + j*ETH_GSTRING_LEN,
9718 bnx2x_stats_arr[i].string);
9725 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9730 static int bnx2x_get_stats_count(struct net_device *dev)
9732 struct bnx2x *bp = netdev_priv(dev);
9736 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9737 if (!IS_E1HMF_MODE_STAT(bp))
9738 num_stats += BNX2X_NUM_STATS;
9740 if (IS_E1HMF_MODE_STAT(bp)) {
9742 for (i = 0; i < BNX2X_NUM_STATS; i++)
9743 if (IS_FUNC_STAT(i))
9746 num_stats = BNX2X_NUM_STATS;
9752 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9753 struct ethtool_stats *stats, u64 *buf)
9755 struct bnx2x *bp = netdev_priv(dev);
9756 u32 *hw_stats, *offset;
9761 for_each_queue(bp, i) {
9762 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9763 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9764 if (bnx2x_q_stats_arr[j].size == 0) {
9765 /* skip this counter */
9769 offset = (hw_stats +
9770 bnx2x_q_stats_arr[j].offset);
9771 if (bnx2x_q_stats_arr[j].size == 4) {
9772 /* 4-byte counter */
9773 buf[k + j] = (u64) *offset;
9776 /* 8-byte counter */
9777 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9779 k += BNX2X_NUM_Q_STATS;
9781 if (IS_E1HMF_MODE_STAT(bp))
9783 hw_stats = (u32 *)&bp->eth_stats;
9784 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9785 if (bnx2x_stats_arr[j].size == 0) {
9786 /* skip this counter */
9790 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9791 if (bnx2x_stats_arr[j].size == 4) {
9792 /* 4-byte counter */
9793 buf[k + j] = (u64) *offset;
9796 /* 8-byte counter */
9797 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9800 hw_stats = (u32 *)&bp->eth_stats;
9801 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9802 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9804 if (bnx2x_stats_arr[i].size == 0) {
9805 /* skip this counter */
9810 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9811 if (bnx2x_stats_arr[i].size == 4) {
9812 /* 4-byte counter */
9813 buf[j] = (u64) *offset;
9817 /* 8-byte counter */
9818 buf[j] = HILO_U64(*offset, *(offset + 1));
9824 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9826 struct bnx2x *bp = netdev_priv(dev);
9827 int port = BP_PORT(bp);
9830 if (!netif_running(dev))
9839 for (i = 0; i < (data * 2); i++) {
9841 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9842 bp->link_params.hw_led_mode,
9843 bp->link_params.chip_id);
9845 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9846 bp->link_params.hw_led_mode,
9847 bp->link_params.chip_id);
9849 msleep_interruptible(500);
9850 if (signal_pending(current))
9854 if (bp->link_vars.link_up)
9855 bnx2x_set_led(bp, port, LED_MODE_OPER,
9856 bp->link_vars.line_speed,
9857 bp->link_params.hw_led_mode,
9858 bp->link_params.chip_id);
9863 static struct ethtool_ops bnx2x_ethtool_ops = {
9864 .get_settings = bnx2x_get_settings,
9865 .set_settings = bnx2x_set_settings,
9866 .get_drvinfo = bnx2x_get_drvinfo,
9867 .get_wol = bnx2x_get_wol,
9868 .set_wol = bnx2x_set_wol,
9869 .get_msglevel = bnx2x_get_msglevel,
9870 .set_msglevel = bnx2x_set_msglevel,
9871 .nway_reset = bnx2x_nway_reset,
9872 .get_link = ethtool_op_get_link,
9873 .get_eeprom_len = bnx2x_get_eeprom_len,
9874 .get_eeprom = bnx2x_get_eeprom,
9875 .set_eeprom = bnx2x_set_eeprom,
9876 .get_coalesce = bnx2x_get_coalesce,
9877 .set_coalesce = bnx2x_set_coalesce,
9878 .get_ringparam = bnx2x_get_ringparam,
9879 .set_ringparam = bnx2x_set_ringparam,
9880 .get_pauseparam = bnx2x_get_pauseparam,
9881 .set_pauseparam = bnx2x_set_pauseparam,
9882 .get_rx_csum = bnx2x_get_rx_csum,
9883 .set_rx_csum = bnx2x_set_rx_csum,
9884 .get_tx_csum = ethtool_op_get_tx_csum,
9885 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9886 .set_flags = bnx2x_set_flags,
9887 .get_flags = ethtool_op_get_flags,
9888 .get_sg = ethtool_op_get_sg,
9889 .set_sg = ethtool_op_set_sg,
9890 .get_tso = ethtool_op_get_tso,
9891 .set_tso = bnx2x_set_tso,
9892 .self_test_count = bnx2x_self_test_count,
9893 .self_test = bnx2x_self_test,
9894 .get_strings = bnx2x_get_strings,
9895 .phys_id = bnx2x_phys_id,
9896 .get_stats_count = bnx2x_get_stats_count,
9897 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9900 /* end of ethtool_ops */
9902 /****************************************************************************
9903 * General service functions
9904 ****************************************************************************/
9906 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9910 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9914 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9915 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9916 PCI_PM_CTRL_PME_STATUS));
9918 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9919 /* delay required during transition out of D3hot */
9924 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9928 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9930 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9933 /* No more memory access after this point until
9934 * device is brought back to D0.
9944 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9948 /* Tell compiler that status block fields can change */
9950 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9951 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9953 return (fp->rx_comp_cons != rx_cons_sb);
9957 * net_device service functions
9960 static int bnx2x_poll(struct napi_struct *napi, int budget)
9962 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9964 struct bnx2x *bp = fp->bp;
9967 #ifdef BNX2X_STOP_ON_ERROR
9968 if (unlikely(bp->panic))
9972 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9973 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9974 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9976 bnx2x_update_fpsb_idx(fp);
9978 if (bnx2x_has_tx_work(fp))
9979 bnx2x_tx_int(fp, budget);
9981 if (bnx2x_has_rx_work(fp))
9982 work_done = bnx2x_rx_int(fp, budget);
9983 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9985 /* must not complete if we consumed full budget */
9986 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9988 #ifdef BNX2X_STOP_ON_ERROR
9991 napi_complete(napi);
9993 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9994 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9995 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9996 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10002 /* we split the first BD into headers and data BDs
10003 * to ease the pain of our fellow microcode engineers
10004 * we use one mapping for both BDs
10005 * So far this has only been observed to happen
10006 * in Other Operating Systems(TM)
10008 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10009 struct bnx2x_fastpath *fp,
10010 struct eth_tx_bd **tx_bd, u16 hlen,
10011 u16 bd_prod, int nbd)
10013 struct eth_tx_bd *h_tx_bd = *tx_bd;
10014 struct eth_tx_bd *d_tx_bd;
10015 dma_addr_t mapping;
10016 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10018 /* first fix first BD */
10019 h_tx_bd->nbd = cpu_to_le16(nbd);
10020 h_tx_bd->nbytes = cpu_to_le16(hlen);
10022 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10023 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10024 h_tx_bd->addr_lo, h_tx_bd->nbd);
10026 /* now get a new data BD
10027 * (after the pbd) and fill it */
10028 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10029 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10031 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10032 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10034 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10035 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10036 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10038 /* this marks the BD as one that has no individual mapping
10039 * the FW ignores this flag in a BD not marked start
10041 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10042 DP(NETIF_MSG_TX_QUEUED,
10043 "TSO split data size is %d (%x:%x)\n",
10044 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10046 /* update tx_bd for marking the last BD flag */
10052 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10055 csum = (u16) ~csum_fold(csum_sub(csum,
10056 csum_partial(t_header - fix, fix, 0)));
10059 csum = (u16) ~csum_fold(csum_add(csum,
10060 csum_partial(t_header, -fix, 0)));
10062 return swab16(csum);
10065 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10069 if (skb->ip_summed != CHECKSUM_PARTIAL)
10073 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10075 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10076 rc |= XMIT_CSUM_TCP;
10080 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10081 rc |= XMIT_CSUM_TCP;
10085 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10088 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10094 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10095 /* check if packet requires linearization (packet is too fragmented) */
10096 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10101 int first_bd_sz = 0;
10103 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10104 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10106 if (xmit_type & XMIT_GSO) {
10107 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10108 /* Check if LSO packet needs to be copied:
10109 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10110 int wnd_size = MAX_FETCH_BD - 3;
10111 /* Number of windows to check */
10112 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10117 /* Headers length */
10118 hlen = (int)(skb_transport_header(skb) - skb->data) +
10121 /* Amount of data (w/o headers) on linear part of SKB*/
10122 first_bd_sz = skb_headlen(skb) - hlen;
10124 wnd_sum = first_bd_sz;
10126 /* Calculate the first sum - it's special */
10127 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10129 skb_shinfo(skb)->frags[frag_idx].size;
10131 /* If there was data on linear skb data - check it */
10132 if (first_bd_sz > 0) {
10133 if (unlikely(wnd_sum < lso_mss)) {
10138 wnd_sum -= first_bd_sz;
10141 /* Others are easier: run through the frag list and
10142 check all windows */
10143 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10145 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10147 if (unlikely(wnd_sum < lso_mss)) {
10152 skb_shinfo(skb)->frags[wnd_idx].size;
10156 /* in non-LSO too fragmented packet should always
10163 if (unlikely(to_copy))
10164 DP(NETIF_MSG_TX_QUEUED,
10165 "Linearization IS REQUIRED for %s packet. "
10166 "num_frags %d hlen %d first_bd_sz %d\n",
10167 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10168 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10174 /* called with netif_tx_lock
10175 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10176 * netif_wake_queue()
10178 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10180 struct bnx2x *bp = netdev_priv(dev);
10181 struct bnx2x_fastpath *fp;
10182 struct netdev_queue *txq;
10183 struct sw_tx_bd *tx_buf;
10184 struct eth_tx_bd *tx_bd;
10185 struct eth_tx_parse_bd *pbd = NULL;
10186 u16 pkt_prod, bd_prod;
10188 dma_addr_t mapping;
10189 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10190 int vlan_off = (bp->e1hov ? 4 : 0);
10194 #ifdef BNX2X_STOP_ON_ERROR
10195 if (unlikely(bp->panic))
10196 return NETDEV_TX_BUSY;
10199 fp_index = skb_get_queue_mapping(skb);
10200 txq = netdev_get_tx_queue(dev, fp_index);
10202 fp = &bp->fp[fp_index];
10204 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10205 fp->eth_q_stats.driver_xoff++,
10206 netif_tx_stop_queue(txq);
10207 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10208 return NETDEV_TX_BUSY;
10211 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10212 " gso type %x xmit_type %x\n",
10213 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10214 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10216 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10217 /* First, check if we need to linearize the skb
10218 (due to FW restrictions) */
10219 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10220 /* Statistics of linearization */
10222 if (skb_linearize(skb) != 0) {
10223 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10224 "silently dropping this SKB\n");
10225 dev_kfree_skb_any(skb);
10226 return NETDEV_TX_OK;
10232 Please read carefully. First we use one BD which we mark as start,
10233 then for TSO or xsum we have a parsing info BD,
10234 and only then we have the rest of the TSO BDs.
10235 (don't forget to mark the last one as last,
10236 and to unmap only AFTER you write to the BD ...)
10237 And above all, all pdb sizes are in words - NOT DWORDS!
10240 pkt_prod = fp->tx_pkt_prod++;
10241 bd_prod = TX_BD(fp->tx_bd_prod);
10243 /* get a tx_buf and first BD */
10244 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10245 tx_bd = &fp->tx_desc_ring[bd_prod];
10247 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10248 tx_bd->general_data = (UNICAST_ADDRESS <<
10249 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10251 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10253 /* remember the first BD of the packet */
10254 tx_buf->first_bd = fp->tx_bd_prod;
10257 DP(NETIF_MSG_TX_QUEUED,
10258 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10259 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10262 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10263 (bp->flags & HW_VLAN_TX_FLAG)) {
10264 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10265 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10269 tx_bd->vlan = cpu_to_le16(pkt_prod);
10272 /* turn on parsing and get a BD */
10273 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10274 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10276 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10279 if (xmit_type & XMIT_CSUM) {
10280 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10282 /* for now NS flag is not used in Linux */
10283 pbd->global_data = (hlen |
10284 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
10285 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10287 pbd->ip_hlen = (skb_transport_header(skb) -
10288 skb_network_header(skb)) / 2;
10290 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10292 pbd->total_hlen = cpu_to_le16(hlen);
10293 hlen = hlen*2 - vlan_off;
10295 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10297 if (xmit_type & XMIT_CSUM_V4)
10298 tx_bd->bd_flags.as_bitfield |=
10299 ETH_TX_BD_FLAGS_IP_CSUM;
10301 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10303 if (xmit_type & XMIT_CSUM_TCP) {
10304 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10307 s8 fix = SKB_CS_OFF(skb); /* signed! */
10309 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10310 pbd->cs_offset = fix / 2;
10312 DP(NETIF_MSG_TX_QUEUED,
10313 "hlen %d offset %d fix %d csum before fix %x\n",
10314 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10317 /* HW bug: fixup the CSUM */
10318 pbd->tcp_pseudo_csum =
10319 bnx2x_csum_fix(skb_transport_header(skb),
10322 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10323 pbd->tcp_pseudo_csum);
10327 mapping = pci_map_single(bp->pdev, skb->data,
10328 skb_headlen(skb), PCI_DMA_TODEVICE);
10330 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10331 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10332 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10333 tx_bd->nbd = cpu_to_le16(nbd);
10334 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10336 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10337 " nbytes %d flags %x vlan %x\n",
10338 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10339 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10340 le16_to_cpu(tx_bd->vlan));
10342 if (xmit_type & XMIT_GSO) {
10344 DP(NETIF_MSG_TX_QUEUED,
10345 "TSO packet len %d hlen %d total len %d tso size %d\n",
10346 skb->len, hlen, skb_headlen(skb),
10347 skb_shinfo(skb)->gso_size);
10349 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10351 if (unlikely(skb_headlen(skb) > hlen))
10352 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10355 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10356 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10357 pbd->tcp_flags = pbd_tcp_flags(skb);
10359 if (xmit_type & XMIT_GSO_V4) {
10360 pbd->ip_id = swab16(ip_hdr(skb)->id);
10361 pbd->tcp_pseudo_csum =
10362 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10363 ip_hdr(skb)->daddr,
10364 0, IPPROTO_TCP, 0));
10367 pbd->tcp_pseudo_csum =
10368 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10369 &ipv6_hdr(skb)->daddr,
10370 0, IPPROTO_TCP, 0));
10372 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10375 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10376 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10378 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10379 tx_bd = &fp->tx_desc_ring[bd_prod];
10381 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10382 frag->size, PCI_DMA_TODEVICE);
10384 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10385 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10386 tx_bd->nbytes = cpu_to_le16(frag->size);
10387 tx_bd->vlan = cpu_to_le16(pkt_prod);
10388 tx_bd->bd_flags.as_bitfield = 0;
10390 DP(NETIF_MSG_TX_QUEUED,
10391 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10392 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10393 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10396 /* now at last mark the BD as the last BD */
10397 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10399 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10400 tx_bd, tx_bd->bd_flags.as_bitfield);
10402 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10404 /* now send a tx doorbell, counting the next BD
10405 * if the packet contains or ends with it
10407 if (TX_BD_POFF(bd_prod) < nbd)
10411 DP(NETIF_MSG_TX_QUEUED,
10412 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10413 " tcp_flags %x xsum %x seq %u hlen %u\n",
10414 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10415 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10416 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10418 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10421 * Make sure that the BD data is updated before updating the producer
10422 * since FW might read the BD right after the producer is updated.
10423 * This is only applicable for weak-ordered memory model archs such
10424 * as IA-64. The following barrier is also mandatory since FW will
10425 * assumes packets must have BDs.
10429 fp->hw_tx_prods->bds_prod =
10430 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
10431 mb(); /* FW restriction: must not reorder writing nbd and packets */
10432 fp->hw_tx_prods->packets_prod =
10433 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
10434 DOORBELL(bp, FP_IDX(fp), 0);
10438 fp->tx_bd_prod += nbd;
10439 dev->trans_start = jiffies;
10441 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10442 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10443 if we put Tx into XOFF state. */
10445 netif_tx_stop_queue(txq);
10446 fp->eth_q_stats.driver_xoff++;
10447 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10448 netif_tx_wake_queue(txq);
10452 return NETDEV_TX_OK;
10455 /* called with rtnl_lock */
10456 static int bnx2x_open(struct net_device *dev)
10458 struct bnx2x *bp = netdev_priv(dev);
10460 netif_carrier_off(dev);
10462 bnx2x_set_power_state(bp, PCI_D0);
10464 return bnx2x_nic_load(bp, LOAD_OPEN);
10467 /* called with rtnl_lock */
10468 static int bnx2x_close(struct net_device *dev)
10470 struct bnx2x *bp = netdev_priv(dev);
10472 /* Unload the driver, release IRQs */
10473 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10474 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10475 if (!CHIP_REV_IS_SLOW(bp))
10476 bnx2x_set_power_state(bp, PCI_D3hot);
10481 /* called with netif_tx_lock from set_multicast */
10482 static void bnx2x_set_rx_mode(struct net_device *dev)
10484 struct bnx2x *bp = netdev_priv(dev);
10485 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10486 int port = BP_PORT(bp);
10488 if (bp->state != BNX2X_STATE_OPEN) {
10489 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10493 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10495 if (dev->flags & IFF_PROMISC)
10496 rx_mode = BNX2X_RX_MODE_PROMISC;
10498 else if ((dev->flags & IFF_ALLMULTI) ||
10499 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10500 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10502 else { /* some multicasts */
10503 if (CHIP_IS_E1(bp)) {
10504 int i, old, offset;
10505 struct dev_mc_list *mclist;
10506 struct mac_configuration_cmd *config =
10507 bnx2x_sp(bp, mcast_config);
10509 for (i = 0, mclist = dev->mc_list;
10510 mclist && (i < dev->mc_count);
10511 i++, mclist = mclist->next) {
10513 config->config_table[i].
10514 cam_entry.msb_mac_addr =
10515 swab16(*(u16 *)&mclist->dmi_addr[0]);
10516 config->config_table[i].
10517 cam_entry.middle_mac_addr =
10518 swab16(*(u16 *)&mclist->dmi_addr[2]);
10519 config->config_table[i].
10520 cam_entry.lsb_mac_addr =
10521 swab16(*(u16 *)&mclist->dmi_addr[4]);
10522 config->config_table[i].cam_entry.flags =
10524 config->config_table[i].
10525 target_table_entry.flags = 0;
10526 config->config_table[i].
10527 target_table_entry.client_id = 0;
10528 config->config_table[i].
10529 target_table_entry.vlan_id = 0;
10532 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10533 config->config_table[i].
10534 cam_entry.msb_mac_addr,
10535 config->config_table[i].
10536 cam_entry.middle_mac_addr,
10537 config->config_table[i].
10538 cam_entry.lsb_mac_addr);
10540 old = config->hdr.length;
10542 for (; i < old; i++) {
10543 if (CAM_IS_INVALID(config->
10544 config_table[i])) {
10545 /* already invalidated */
10549 CAM_INVALIDATE(config->
10554 if (CHIP_REV_IS_SLOW(bp))
10555 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10557 offset = BNX2X_MAX_MULTICAST*(1 + port);
10559 config->hdr.length = i;
10560 config->hdr.offset = offset;
10561 config->hdr.client_id = bp->fp->cl_id;
10562 config->hdr.reserved1 = 0;
10564 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10565 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10566 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10569 /* Accept one or more multicasts */
10570 struct dev_mc_list *mclist;
10571 u32 mc_filter[MC_HASH_SIZE];
10572 u32 crc, bit, regidx;
10575 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10577 for (i = 0, mclist = dev->mc_list;
10578 mclist && (i < dev->mc_count);
10579 i++, mclist = mclist->next) {
10581 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10584 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10585 bit = (crc >> 24) & 0xff;
10588 mc_filter[regidx] |= (1 << bit);
10591 for (i = 0; i < MC_HASH_SIZE; i++)
10592 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10597 bp->rx_mode = rx_mode;
10598 bnx2x_set_storm_rx_mode(bp);
10601 /* called with rtnl_lock */
10602 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10604 struct sockaddr *addr = p;
10605 struct bnx2x *bp = netdev_priv(dev);
10607 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10610 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10611 if (netif_running(dev)) {
10612 if (CHIP_IS_E1(bp))
10613 bnx2x_set_mac_addr_e1(bp, 1);
10615 bnx2x_set_mac_addr_e1h(bp, 1);
10621 /* called with rtnl_lock */
10622 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10624 struct mii_ioctl_data *data = if_mii(ifr);
10625 struct bnx2x *bp = netdev_priv(dev);
10626 int port = BP_PORT(bp);
10631 data->phy_id = bp->port.phy_addr;
10635 case SIOCGMIIREG: {
10638 if (!netif_running(dev))
10641 mutex_lock(&bp->port.phy_mutex);
10642 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10643 DEFAULT_PHY_DEV_ADDR,
10644 (data->reg_num & 0x1f), &mii_regval);
10645 data->val_out = mii_regval;
10646 mutex_unlock(&bp->port.phy_mutex);
10651 if (!capable(CAP_NET_ADMIN))
10654 if (!netif_running(dev))
10657 mutex_lock(&bp->port.phy_mutex);
10658 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10659 DEFAULT_PHY_DEV_ADDR,
10660 (data->reg_num & 0x1f), data->val_in);
10661 mutex_unlock(&bp->port.phy_mutex);
10669 return -EOPNOTSUPP;
10672 /* called with rtnl_lock */
10673 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10675 struct bnx2x *bp = netdev_priv(dev);
10678 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10679 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10682 /* This does not race with packet allocation
10683 * because the actual alloc size is
10684 * only updated as part of load
10686 dev->mtu = new_mtu;
10688 if (netif_running(dev)) {
10689 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10690 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10696 static void bnx2x_tx_timeout(struct net_device *dev)
10698 struct bnx2x *bp = netdev_priv(dev);
10700 #ifdef BNX2X_STOP_ON_ERROR
10704 /* This allows the netif to be shutdown gracefully before resetting */
10705 schedule_work(&bp->reset_task);
10709 /* called with rtnl_lock */
10710 static void bnx2x_vlan_rx_register(struct net_device *dev,
10711 struct vlan_group *vlgrp)
10713 struct bnx2x *bp = netdev_priv(dev);
10717 /* Set flags according to the required capabilities */
10718 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10720 if (dev->features & NETIF_F_HW_VLAN_TX)
10721 bp->flags |= HW_VLAN_TX_FLAG;
10723 if (dev->features & NETIF_F_HW_VLAN_RX)
10724 bp->flags |= HW_VLAN_RX_FLAG;
10726 if (netif_running(dev))
10727 bnx2x_set_client_config(bp);
10732 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10733 static void poll_bnx2x(struct net_device *dev)
10735 struct bnx2x *bp = netdev_priv(dev);
10737 disable_irq(bp->pdev->irq);
10738 bnx2x_interrupt(bp->pdev->irq, dev);
10739 enable_irq(bp->pdev->irq);
10743 static const struct net_device_ops bnx2x_netdev_ops = {
10744 .ndo_open = bnx2x_open,
10745 .ndo_stop = bnx2x_close,
10746 .ndo_start_xmit = bnx2x_start_xmit,
10747 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10748 .ndo_set_mac_address = bnx2x_change_mac_addr,
10749 .ndo_validate_addr = eth_validate_addr,
10750 .ndo_do_ioctl = bnx2x_ioctl,
10751 .ndo_change_mtu = bnx2x_change_mtu,
10752 .ndo_tx_timeout = bnx2x_tx_timeout,
10754 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10756 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10757 .ndo_poll_controller = poll_bnx2x,
10762 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10763 struct net_device *dev)
10768 SET_NETDEV_DEV(dev, &pdev->dev);
10769 bp = netdev_priv(dev);
10774 bp->func = PCI_FUNC(pdev->devfn);
10776 rc = pci_enable_device(pdev);
10778 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10782 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10783 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10786 goto err_out_disable;
10789 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10790 printk(KERN_ERR PFX "Cannot find second PCI device"
10791 " base address, aborting\n");
10793 goto err_out_disable;
10796 if (atomic_read(&pdev->enable_cnt) == 1) {
10797 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10799 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10801 goto err_out_disable;
10804 pci_set_master(pdev);
10805 pci_save_state(pdev);
10808 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10809 if (bp->pm_cap == 0) {
10810 printk(KERN_ERR PFX "Cannot find power management"
10811 " capability, aborting\n");
10813 goto err_out_release;
10816 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10817 if (bp->pcie_cap == 0) {
10818 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10821 goto err_out_release;
10824 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10825 bp->flags |= USING_DAC_FLAG;
10826 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10827 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10828 " failed, aborting\n");
10830 goto err_out_release;
10833 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10834 printk(KERN_ERR PFX "System does not support DMA,"
10837 goto err_out_release;
10840 dev->mem_start = pci_resource_start(pdev, 0);
10841 dev->base_addr = dev->mem_start;
10842 dev->mem_end = pci_resource_end(pdev, 0);
10844 dev->irq = pdev->irq;
10846 bp->regview = pci_ioremap_bar(pdev, 0);
10847 if (!bp->regview) {
10848 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10850 goto err_out_release;
10853 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10854 min_t(u64, BNX2X_DB_SIZE,
10855 pci_resource_len(pdev, 2)));
10856 if (!bp->doorbells) {
10857 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10859 goto err_out_unmap;
10862 bnx2x_set_power_state(bp, PCI_D0);
10864 /* clean indirect addresses */
10865 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10866 PCICFG_VENDOR_ID_OFFSET);
10867 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10868 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10869 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10870 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10872 dev->watchdog_timeo = TX_TIMEOUT;
10874 dev->netdev_ops = &bnx2x_netdev_ops;
10875 dev->ethtool_ops = &bnx2x_ethtool_ops;
10876 dev->features |= NETIF_F_SG;
10877 dev->features |= NETIF_F_HW_CSUM;
10878 if (bp->flags & USING_DAC_FLAG)
10879 dev->features |= NETIF_F_HIGHDMA;
10881 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10882 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10884 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10885 dev->features |= NETIF_F_TSO6;
10891 iounmap(bp->regview);
10892 bp->regview = NULL;
10894 if (bp->doorbells) {
10895 iounmap(bp->doorbells);
10896 bp->doorbells = NULL;
10900 if (atomic_read(&pdev->enable_cnt) == 1)
10901 pci_release_regions(pdev);
10904 pci_disable_device(pdev);
10905 pci_set_drvdata(pdev, NULL);
10911 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10913 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10915 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10919 /* return value of 1=2.5GHz 2=5GHz */
10920 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10922 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10924 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10928 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10929 const struct pci_device_id *ent)
10931 static int version_printed;
10932 struct net_device *dev = NULL;
10936 if (version_printed++ == 0)
10937 printk(KERN_INFO "%s", version);
10939 /* dev zeroed in init_etherdev */
10940 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10942 printk(KERN_ERR PFX "Cannot allocate net device\n");
10946 bp = netdev_priv(dev);
10947 bp->msglevel = debug;
10949 rc = bnx2x_init_dev(pdev, dev);
10955 pci_set_drvdata(pdev, dev);
10957 rc = bnx2x_init_bp(bp);
10959 goto init_one_exit;
10961 rc = register_netdev(dev);
10963 dev_err(&pdev->dev, "Cannot register net device\n");
10964 goto init_one_exit;
10967 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10968 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
10969 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10970 bnx2x_get_pcie_width(bp),
10971 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10972 dev->base_addr, bp->pdev->irq);
10973 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10978 iounmap(bp->regview);
10981 iounmap(bp->doorbells);
10985 if (atomic_read(&pdev->enable_cnt) == 1)
10986 pci_release_regions(pdev);
10988 pci_disable_device(pdev);
10989 pci_set_drvdata(pdev, NULL);
10994 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10996 struct net_device *dev = pci_get_drvdata(pdev);
11000 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11003 bp = netdev_priv(dev);
11005 unregister_netdev(dev);
11008 iounmap(bp->regview);
11011 iounmap(bp->doorbells);
11015 if (atomic_read(&pdev->enable_cnt) == 1)
11016 pci_release_regions(pdev);
11018 pci_disable_device(pdev);
11019 pci_set_drvdata(pdev, NULL);
11022 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11024 struct net_device *dev = pci_get_drvdata(pdev);
11028 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11031 bp = netdev_priv(dev);
11035 pci_save_state(pdev);
11037 if (!netif_running(dev)) {
11042 netif_device_detach(dev);
11044 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11046 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11053 static int bnx2x_resume(struct pci_dev *pdev)
11055 struct net_device *dev = pci_get_drvdata(pdev);
11060 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11063 bp = netdev_priv(dev);
11067 pci_restore_state(pdev);
11069 if (!netif_running(dev)) {
11074 bnx2x_set_power_state(bp, PCI_D0);
11075 netif_device_attach(dev);
11077 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11084 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11088 bp->state = BNX2X_STATE_ERROR;
11090 bp->rx_mode = BNX2X_RX_MODE_NONE;
11092 bnx2x_netif_stop(bp, 0);
11094 del_timer_sync(&bp->timer);
11095 bp->stats_state = STATS_STATE_DISABLED;
11096 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11099 bnx2x_free_irq(bp);
11101 if (CHIP_IS_E1(bp)) {
11102 struct mac_configuration_cmd *config =
11103 bnx2x_sp(bp, mcast_config);
11105 for (i = 0; i < config->hdr.length; i++)
11106 CAM_INVALIDATE(config->config_table[i]);
11109 /* Free SKBs, SGEs, TPA pool and driver internals */
11110 bnx2x_free_skbs(bp);
11111 for_each_rx_queue(bp, i)
11112 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11113 for_each_rx_queue(bp, i)
11114 netif_napi_del(&bnx2x_fp(bp, i, napi));
11115 bnx2x_free_mem(bp);
11117 bp->state = BNX2X_STATE_CLOSED;
11119 netif_carrier_off(bp->dev);
11124 static void bnx2x_eeh_recover(struct bnx2x *bp)
11128 mutex_init(&bp->port.phy_mutex);
11130 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11131 bp->link_params.shmem_base = bp->common.shmem_base;
11132 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11134 if (!bp->common.shmem_base ||
11135 (bp->common.shmem_base < 0xA0000) ||
11136 (bp->common.shmem_base >= 0xC0000)) {
11137 BNX2X_DEV_INFO("MCP not active\n");
11138 bp->flags |= NO_MCP_FLAG;
11142 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11143 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11144 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11145 BNX2X_ERR("BAD MCP validity signature\n");
11147 if (!BP_NOMCP(bp)) {
11148 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11149 & DRV_MSG_SEQ_NUMBER_MASK);
11150 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11155 * bnx2x_io_error_detected - called when PCI error is detected
11156 * @pdev: Pointer to PCI device
11157 * @state: The current pci connection state
11159 * This function is called after a PCI bus error affecting
11160 * this device has been detected.
11162 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11163 pci_channel_state_t state)
11165 struct net_device *dev = pci_get_drvdata(pdev);
11166 struct bnx2x *bp = netdev_priv(dev);
11170 netif_device_detach(dev);
11172 if (netif_running(dev))
11173 bnx2x_eeh_nic_unload(bp);
11175 pci_disable_device(pdev);
11179 /* Request a slot reset */
11180 return PCI_ERS_RESULT_NEED_RESET;
11184 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11185 * @pdev: Pointer to PCI device
11187 * Restart the card from scratch, as if from a cold-boot.
11189 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11191 struct net_device *dev = pci_get_drvdata(pdev);
11192 struct bnx2x *bp = netdev_priv(dev);
11196 if (pci_enable_device(pdev)) {
11197 dev_err(&pdev->dev,
11198 "Cannot re-enable PCI device after reset\n");
11200 return PCI_ERS_RESULT_DISCONNECT;
11203 pci_set_master(pdev);
11204 pci_restore_state(pdev);
11206 if (netif_running(dev))
11207 bnx2x_set_power_state(bp, PCI_D0);
11211 return PCI_ERS_RESULT_RECOVERED;
11215 * bnx2x_io_resume - called when traffic can start flowing again
11216 * @pdev: Pointer to PCI device
11218 * This callback is called when the error recovery driver tells us that
11219 * its OK to resume normal operation.
11221 static void bnx2x_io_resume(struct pci_dev *pdev)
11223 struct net_device *dev = pci_get_drvdata(pdev);
11224 struct bnx2x *bp = netdev_priv(dev);
11228 bnx2x_eeh_recover(bp);
11230 if (netif_running(dev))
11231 bnx2x_nic_load(bp, LOAD_NORMAL);
11233 netif_device_attach(dev);
11238 static struct pci_error_handlers bnx2x_err_handler = {
11239 .error_detected = bnx2x_io_error_detected,
11240 .slot_reset = bnx2x_io_slot_reset,
11241 .resume = bnx2x_io_resume,
11244 static struct pci_driver bnx2x_pci_driver = {
11245 .name = DRV_MODULE_NAME,
11246 .id_table = bnx2x_pci_tbl,
11247 .probe = bnx2x_init_one,
11248 .remove = __devexit_p(bnx2x_remove_one),
11249 .suspend = bnx2x_suspend,
11250 .resume = bnx2x_resume,
11251 .err_handler = &bnx2x_err_handler,
11254 static int __init bnx2x_init(void)
11256 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11257 if (bnx2x_wq == NULL) {
11258 printk(KERN_ERR PFX "Cannot create workqueue\n");
11262 return pci_register_driver(&bnx2x_pci_driver);
11265 static void __exit bnx2x_cleanup(void)
11267 pci_unregister_driver(&bnx2x_pci_driver);
11269 destroy_workqueue(bnx2x_wq);
11272 module_init(bnx2x_init);
11273 module_exit(bnx2x_cleanup);