1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.114-1"
60 #define DRV_MODULE_RELDATE "2009/07/29"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
143 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
145 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
157 * locking is done by mcp
159 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
179 static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
206 struct dmae_command dmae;
207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
219 memset(&dmae, 0, sizeof(struct dmae_command));
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 DMAE_CMD_ENDIANITY_DW_SWAP |
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251 mutex_lock(&bp->dmae_mutex);
255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
263 BNX2X_ERR("DMAE timeout!\n");
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
274 mutex_unlock(&bp->dmae_mutex);
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 struct dmae_command dmae;
280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
294 memset(&dmae, 0, sizeof(struct dmae_command));
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 DMAE_CMD_ENDIANITY_DW_SWAP |
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323 mutex_lock(&bp->dmae_mutex);
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
332 while (*wb_comp != DMAE_COMP_VAL) {
335 BNX2X_ERR("DMAE timeout!\n");
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349 mutex_unlock(&bp->dmae_mutex);
352 /* used only for slowpath so not inlined */
353 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
357 wb_write[0] = val_hi;
358 wb_write[1] = val_lo;
359 REG_WR_DMAE(bp, reg, wb_write, 2);
363 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
367 REG_RD_DMAE(bp, reg, wb_data, 2);
369 return HILO_U64(wb_data[0], wb_data[1]);
373 static int bnx2x_mc_assert(struct bnx2x *bp)
377 u32 row0, row1, row2, row3;
380 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
381 XSTORM_ASSERT_LIST_INDEX_OFFSET);
383 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
385 /* print the asserts */
386 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
388 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i));
390 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
392 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
394 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
395 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
397 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
398 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
399 " 0x%08x 0x%08x 0x%08x\n",
400 i, row3, row2, row1, row0);
408 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
409 TSTORM_ASSERT_LIST_INDEX_OFFSET);
411 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
413 /* print the asserts */
414 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
416 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i));
418 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
420 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
422 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
423 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
425 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
426 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
427 " 0x%08x 0x%08x 0x%08x\n",
428 i, row3, row2, row1, row0);
436 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
437 CSTORM_ASSERT_LIST_INDEX_OFFSET);
439 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
441 /* print the asserts */
442 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
444 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i));
446 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
448 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
450 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
451 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
453 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
454 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
455 " 0x%08x 0x%08x 0x%08x\n",
456 i, row3, row2, row1, row0);
464 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
465 USTORM_ASSERT_LIST_INDEX_OFFSET);
467 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
469 /* print the asserts */
470 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
472 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i));
474 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 4);
476 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_OFFSET(i) + 8);
478 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
479 USTORM_ASSERT_LIST_OFFSET(i) + 12);
481 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
482 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
483 " 0x%08x 0x%08x 0x%08x\n",
484 i, row3, row2, row1, row0);
494 static void bnx2x_fw_dump(struct bnx2x *bp)
500 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
501 mark = ((mark + 0x3) & ~0x3);
502 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
504 printk(KERN_ERR PFX);
505 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
506 for (word = 0; word < 8; word++)
507 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
510 printk(KERN_CONT "%s", (char *)data);
512 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
513 for (word = 0; word < 8; word++)
514 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
517 printk(KERN_CONT "%s", (char *)data);
519 printk(KERN_ERR PFX "end of fw dump\n");
522 static void bnx2x_panic_dump(struct bnx2x *bp)
527 bp->stats_state = STATS_STATE_DISABLED;
528 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
530 BNX2X_ERR("begin crash dump -----------------\n");
534 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
535 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
536 " spq_prod_idx(%u)\n",
537 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
538 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
541 for_each_rx_queue(bp, i) {
542 struct bnx2x_fastpath *fp = &bp->fp[i];
544 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
545 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
546 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
547 i, fp->rx_bd_prod, fp->rx_bd_cons,
548 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
549 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
550 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
551 " fp_u_idx(%x) *sb_u_idx(%x)\n",
552 fp->rx_sge_prod, fp->last_max_sge,
553 le16_to_cpu(fp->fp_u_idx),
554 fp->status_blk->u_status_block.status_block_index);
558 for_each_tx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
561 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
562 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
563 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
564 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
565 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
566 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
567 fp->status_blk->c_status_block.status_block_index,
568 fp->tx_db.data.prod);
573 for_each_rx_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i];
576 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
577 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
578 for (j = start; j != end; j = RX_BD(j + 1)) {
579 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
580 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
582 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
583 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
586 start = RX_SGE(fp->rx_sge_prod);
587 end = RX_SGE(fp->last_max_sge);
588 for (j = start; j != end; j = RX_SGE(j + 1)) {
589 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
590 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
592 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
593 i, j, rx_sge[1], rx_sge[0], sw_page->page);
596 start = RCQ_BD(fp->rx_comp_cons - 10);
597 end = RCQ_BD(fp->rx_comp_cons + 503);
598 for (j = start; j != end; j = RCQ_BD(j + 1)) {
599 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
601 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
602 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
607 for_each_tx_queue(bp, i) {
608 struct bnx2x_fastpath *fp = &bp->fp[i];
610 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
611 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
612 for (j = start; j != end; j = TX_BD(j + 1)) {
613 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
615 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
616 i, j, sw_bd->skb, sw_bd->first_bd);
619 start = TX_BD(fp->tx_bd_cons - 10);
620 end = TX_BD(fp->tx_bd_cons + 254);
621 for (j = start; j != end; j = TX_BD(j + 1)) {
622 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
624 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
625 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
631 BNX2X_ERR("end crash dump -----------------\n");
634 static void bnx2x_int_enable(struct bnx2x *bp)
636 int port = BP_PORT(bp);
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
640 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
643 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0);
645 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
646 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
648 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
653 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
654 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
655 HC_CONFIG_0_REG_INT_LINE_EN_0 |
656 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
658 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
661 REG_WR(bp, addr, val);
663 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
666 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
667 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
669 REG_WR(bp, addr, val);
671 * Ensure that HC_CONFIG is written before leading/trailing edge config
676 if (CHIP_IS_E1H(bp)) {
677 /* init leading/trailing edge */
679 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
681 /* enable nig and gpio3 attention */
686 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
687 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
690 /* Make sure that interrupts are indeed enabled from here on */
694 static void bnx2x_int_disable(struct bnx2x *bp)
696 int port = BP_PORT(bp);
697 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
698 u32 val = REG_RD(bp, addr);
700 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
701 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
702 HC_CONFIG_0_REG_INT_LINE_EN_0 |
703 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
705 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
708 /* flush all outstanding writes */
711 REG_WR(bp, addr, val);
712 if (REG_RD(bp, addr) != val)
713 BNX2X_ERR("BUG! proper val not read from IGU!\n");
717 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
719 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
722 /* disable interrupt handling */
723 atomic_inc(&bp->intr_sem);
724 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
727 /* prevent the HW from sending interrupts */
728 bnx2x_int_disable(bp);
730 /* make sure all ISRs are done */
732 synchronize_irq(bp->msix_table[0].vector);
734 for_each_queue(bp, i)
735 synchronize_irq(bp->msix_table[i + offset].vector);
737 synchronize_irq(bp->pdev->irq);
739 /* make sure sp_task is not running */
740 cancel_delayed_work(&bp->sp_task);
741 flush_workqueue(bnx2x_wq);
747 * General service functions
750 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
751 u8 storm, u16 index, u8 op, u8 update)
753 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
754 COMMAND_REG_INT_ACK);
755 struct igu_ack_register igu_ack;
757 igu_ack.status_block_index = index;
758 igu_ack.sb_id_and_flags =
759 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
760 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
761 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
762 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
764 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
765 (*(u32 *)&igu_ack), hc_addr);
766 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
768 /* Make sure that ACK is written */
773 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
775 struct host_status_block *fpsb = fp->status_blk;
778 barrier(); /* status block is written to by the chip */
779 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
780 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
783 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
784 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
790 static u16 bnx2x_ack_int(struct bnx2x *bp)
792 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
793 COMMAND_REG_SIMD_MASK);
794 u32 result = REG_RD(bp, hc_addr);
796 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
804 * fast path service functions
807 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
809 /* Tell compiler that consumer and producer can change */
811 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
814 /* free skb in the packet ring at pos idx
815 * return idx of last bd freed
817 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
820 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
821 struct eth_tx_start_bd *tx_start_bd;
822 struct eth_tx_bd *tx_data_bd;
823 struct sk_buff *skb = tx_buf->skb;
824 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
827 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
831 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
832 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
833 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
834 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
836 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
837 #ifdef BNX2X_STOP_ON_ERROR
838 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
839 BNX2X_ERR("BAD nbd!\n");
843 new_cons = nbd + tx_buf->first_bd;
845 /* Get the next bd */
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
848 /* Skip a parse bd... */
850 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852 /* ...and the TSO split header bd since they have no mapping */
853 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
855 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
861 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
862 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
863 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
864 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
871 dev_kfree_skb_any(skb);
872 tx_buf->first_bd = 0;
878 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
884 barrier(); /* Tell compiler that prod and cons can change */
885 prod = fp->tx_bd_prod;
886 cons = fp->tx_bd_cons;
888 /* NUM_TX_RINGS = number of "next-page" entries
889 It will be used as a threshold */
890 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
892 #ifdef BNX2X_STOP_ON_ERROR
894 WARN_ON(used > fp->bp->tx_ring_size);
895 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
898 return (s16)(fp->bp->tx_ring_size) - used;
901 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
903 struct bnx2x *bp = fp->bp;
904 struct netdev_queue *txq;
905 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
908 #ifdef BNX2X_STOP_ON_ERROR
909 if (unlikely(bp->panic))
913 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
914 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
915 sw_cons = fp->tx_pkt_cons;
917 while (sw_cons != hw_cons) {
920 pkt_cons = TX_BD(sw_cons);
922 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
924 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
925 hw_cons, sw_cons, pkt_cons);
927 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
929 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
932 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
937 fp->tx_pkt_cons = sw_cons;
938 fp->tx_bd_cons = bd_cons;
940 /* TBD need a thresh? */
941 if (unlikely(netif_tx_queue_stopped(txq))) {
943 /* Need to make the tx_bd_cons update visible to start_xmit()
944 * before checking for netif_tx_queue_stopped(). Without the
945 * memory barrier, there is a small possibility that
946 * start_xmit() will miss it and cause the queue to be stopped
951 if ((netif_tx_queue_stopped(txq)) &&
952 (bp->state == BNX2X_STATE_OPEN) &&
953 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
954 netif_tx_wake_queue(txq);
959 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960 union eth_rx_cqe *rr_cqe)
962 struct bnx2x *bp = fp->bp;
963 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
964 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
967 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
968 fp->index, cid, command, bp->state,
969 rr_cqe->ramrod_cqe.ramrod_type);
974 switch (command | fp->state) {
975 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
976 BNX2X_FP_STATE_OPENING):
977 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
979 fp->state = BNX2X_FP_STATE_OPEN;
982 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
983 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
985 fp->state = BNX2X_FP_STATE_HALTED;
989 BNX2X_ERR("unexpected MC reply (%d) "
990 "fp->state is %x\n", command, fp->state);
993 mb(); /* force bnx2x_wait_ramrod() to see the change */
997 switch (command | bp->state) {
998 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
999 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1000 bp->state = BNX2X_STATE_OPEN;
1003 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1005 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1006 fp->state = BNX2X_FP_STATE_HALTED;
1009 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1010 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1011 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1015 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1016 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1017 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1018 bp->set_mac_pending = 0;
1021 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1022 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1023 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1027 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1028 command, bp->state);
1031 mb(); /* force bnx2x_wait_ramrod() to see the change */
1034 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1035 struct bnx2x_fastpath *fp, u16 index)
1037 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1038 struct page *page = sw_buf->page;
1039 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1041 /* Skip "next page" elements */
1045 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1046 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1047 __free_pages(page, PAGES_PER_SGE_SHIFT);
1049 sw_buf->page = NULL;
1054 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1055 struct bnx2x_fastpath *fp, int last)
1059 for (i = 0; i < last; i++)
1060 bnx2x_free_rx_sge(bp, fp, i);
1063 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1064 struct bnx2x_fastpath *fp, u16 index)
1066 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1067 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1068 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1071 if (unlikely(page == NULL))
1074 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1075 PCI_DMA_FROMDEVICE);
1076 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1077 __free_pages(page, PAGES_PER_SGE_SHIFT);
1081 sw_buf->page = page;
1082 pci_unmap_addr_set(sw_buf, mapping, mapping);
1084 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1085 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1090 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1091 struct bnx2x_fastpath *fp, u16 index)
1093 struct sk_buff *skb;
1094 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1095 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1098 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1099 if (unlikely(skb == NULL))
1102 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1103 PCI_DMA_FROMDEVICE);
1104 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1110 pci_unmap_addr_set(rx_buf, mapping, mapping);
1112 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1113 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1118 /* note that we are not allocating a new skb,
1119 * we are just moving one from cons to prod
1120 * we are not creating a new mapping,
1121 * so there is no need to check for dma_mapping_error().
1123 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1124 struct sk_buff *skb, u16 cons, u16 prod)
1126 struct bnx2x *bp = fp->bp;
1127 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1128 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1129 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1130 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1132 pci_dma_sync_single_for_device(bp->pdev,
1133 pci_unmap_addr(cons_rx_buf, mapping),
1134 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1136 prod_rx_buf->skb = cons_rx_buf->skb;
1137 pci_unmap_addr_set(prod_rx_buf, mapping,
1138 pci_unmap_addr(cons_rx_buf, mapping));
1139 *prod_bd = *cons_bd;
1142 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1145 u16 last_max = fp->last_max_sge;
1147 if (SUB_S16(idx, last_max) > 0)
1148 fp->last_max_sge = idx;
1151 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1155 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1156 int idx = RX_SGE_CNT * i - 1;
1158 for (j = 0; j < 2; j++) {
1159 SGE_MASK_CLEAR_BIT(fp, idx);
1165 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1166 struct eth_fast_path_rx_cqe *fp_cqe)
1168 struct bnx2x *bp = fp->bp;
1169 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1170 le16_to_cpu(fp_cqe->len_on_bd)) >>
1172 u16 last_max, last_elem, first_elem;
1179 /* First mark all used pages */
1180 for (i = 0; i < sge_len; i++)
1181 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1183 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1184 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1186 /* Here we assume that the last SGE index is the biggest */
1187 prefetch((void *)(fp->sge_mask));
1188 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1190 last_max = RX_SGE(fp->last_max_sge);
1191 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1192 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1194 /* If ring is not full */
1195 if (last_elem + 1 != first_elem)
1198 /* Now update the prod */
1199 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1200 if (likely(fp->sge_mask[i]))
1203 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1204 delta += RX_SGE_MASK_ELEM_SZ;
1208 fp->rx_sge_prod += delta;
1209 /* clear page-end entries */
1210 bnx2x_clear_sge_mask_next_elems(fp);
1213 DP(NETIF_MSG_RX_STATUS,
1214 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1215 fp->last_max_sge, fp->rx_sge_prod);
1218 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1220 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1221 memset(fp->sge_mask, 0xff,
1222 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1224 /* Clear the two last indices in the page to 1:
1225 these are the indices that correspond to the "next" element,
1226 hence will never be indicated and should be removed from
1227 the calculations. */
1228 bnx2x_clear_sge_mask_next_elems(fp);
1231 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1232 struct sk_buff *skb, u16 cons, u16 prod)
1234 struct bnx2x *bp = fp->bp;
1235 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1236 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1237 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1240 /* move empty skb from pool to prod and map it */
1241 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1242 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1243 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1244 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1246 /* move partial skb from cons to pool (don't unmap yet) */
1247 fp->tpa_pool[queue] = *cons_rx_buf;
1249 /* mark bin state as start - print error if current state != stop */
1250 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1251 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1253 fp->tpa_state[queue] = BNX2X_TPA_START;
1255 /* point prod_bd to new skb */
1256 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1257 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1259 #ifdef BNX2X_STOP_ON_ERROR
1260 fp->tpa_queue_used |= (1 << queue);
1261 #ifdef __powerpc64__
1262 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1266 fp->tpa_queue_used);
1270 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1271 struct sk_buff *skb,
1272 struct eth_fast_path_rx_cqe *fp_cqe,
1275 struct sw_rx_page *rx_pg, old_rx_pg;
1276 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1277 u32 i, frag_len, frag_size, pages;
1281 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1282 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1284 /* This is needed in order to enable forwarding support */
1286 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1287 max(frag_size, (u32)len_on_bd));
1289 #ifdef BNX2X_STOP_ON_ERROR
1291 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1292 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1294 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1295 fp_cqe->pkt_len, len_on_bd);
1301 /* Run through the SGL and compose the fragmented skb */
1302 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1303 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1305 /* FW gives the indices of the SGE as if the ring is an array
1306 (meaning that "next" element will consume 2 indices) */
1307 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1308 rx_pg = &fp->rx_page_ring[sge_idx];
1311 /* If we fail to allocate a substitute page, we simply stop
1312 where we are and drop the whole packet */
1313 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1314 if (unlikely(err)) {
1315 fp->eth_q_stats.rx_skb_alloc_failed++;
1319 /* Unmap the page as we r going to pass it to the stack */
1320 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1321 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1323 /* Add one frag and update the appropriate fields in the skb */
1324 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1326 skb->data_len += frag_len;
1327 skb->truesize += frag_len;
1328 skb->len += frag_len;
1330 frag_size -= frag_len;
1336 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1337 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1340 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1341 struct sk_buff *skb = rx_buf->skb;
1343 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1345 /* Unmap skb in the pool anyway, as we are going to change
1346 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1348 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1349 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1351 if (likely(new_skb)) {
1352 /* fix ip xsum and give it to the stack */
1353 /* (no need to map the new skb) */
1356 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1357 PARSING_FLAGS_VLAN);
1358 int is_not_hwaccel_vlan_cqe =
1359 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1363 prefetch(((char *)(skb)) + 128);
1365 #ifdef BNX2X_STOP_ON_ERROR
1366 if (pad + len > bp->rx_buf_size) {
1367 BNX2X_ERR("skb_put is about to fail... "
1368 "pad %d len %d rx_buf_size %d\n",
1369 pad, len, bp->rx_buf_size);
1375 skb_reserve(skb, pad);
1378 skb->protocol = eth_type_trans(skb, bp->dev);
1379 skb->ip_summed = CHECKSUM_UNNECESSARY;
1384 iph = (struct iphdr *)skb->data;
1386 /* If there is no Rx VLAN offloading -
1387 take VLAN tag into an account */
1388 if (unlikely(is_not_hwaccel_vlan_cqe))
1389 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1392 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1395 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1396 &cqe->fast_path_cqe, cqe_idx)) {
1398 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1399 (!is_not_hwaccel_vlan_cqe))
1400 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1401 le16_to_cpu(cqe->fast_path_cqe.
1405 netif_receive_skb(skb);
1407 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1408 " - dropping packet!\n");
1413 /* put new skb in bin */
1414 fp->tpa_pool[queue].skb = new_skb;
1417 /* else drop the packet and keep the buffer in the bin */
1418 DP(NETIF_MSG_RX_STATUS,
1419 "Failed to allocate new skb - dropping packet!\n");
1420 fp->eth_q_stats.rx_skb_alloc_failed++;
1423 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1426 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1427 struct bnx2x_fastpath *fp,
1428 u16 bd_prod, u16 rx_comp_prod,
1431 struct ustorm_eth_rx_producers rx_prods = {0};
1434 /* Update producers */
1435 rx_prods.bd_prod = bd_prod;
1436 rx_prods.cqe_prod = rx_comp_prod;
1437 rx_prods.sge_prod = rx_sge_prod;
1440 * Make sure that the BD and SGE data is updated before updating the
1441 * producers since FW might read the BD/SGE right after the producer
1443 * This is only applicable for weak-ordered memory model archs such
1444 * as IA-64. The following barrier is also mandatory since FW will
1445 * assumes BDs must have buffers.
1449 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1450 REG_WR(bp, BAR_USTRORM_INTMEM +
1451 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1452 ((u32 *)&rx_prods)[i]);
1454 mmiowb(); /* keep prod updates ordered */
1456 DP(NETIF_MSG_RX_STATUS,
1457 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1458 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1461 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1463 struct bnx2x *bp = fp->bp;
1464 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1465 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1468 #ifdef BNX2X_STOP_ON_ERROR
1469 if (unlikely(bp->panic))
1473 /* CQ "next element" is of the size of the regular element,
1474 that's why it's ok here */
1475 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1476 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1479 bd_cons = fp->rx_bd_cons;
1480 bd_prod = fp->rx_bd_prod;
1481 bd_prod_fw = bd_prod;
1482 sw_comp_cons = fp->rx_comp_cons;
1483 sw_comp_prod = fp->rx_comp_prod;
1485 /* Memory barrier necessary as speculative reads of the rx
1486 * buffer can be ahead of the index in the status block
1490 DP(NETIF_MSG_RX_STATUS,
1491 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1492 fp->index, hw_comp_cons, sw_comp_cons);
1494 while (sw_comp_cons != hw_comp_cons) {
1495 struct sw_rx_bd *rx_buf = NULL;
1496 struct sk_buff *skb;
1497 union eth_rx_cqe *cqe;
1501 comp_ring_cons = RCQ_BD(sw_comp_cons);
1502 bd_prod = RX_BD(bd_prod);
1503 bd_cons = RX_BD(bd_cons);
1505 /* Prefetch the page containing the BD descriptor
1506 at producer's index. It will be needed when new skb is
1508 prefetch((void *)(PAGE_ALIGN((unsigned long)
1509 (&fp->rx_desc_ring[bd_prod])) -
1512 cqe = &fp->rx_comp_ring[comp_ring_cons];
1513 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1515 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1516 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1517 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1518 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1519 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1520 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1522 /* is this a slowpath msg? */
1523 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1524 bnx2x_sp_event(fp, cqe);
1527 /* this is an rx packet */
1529 rx_buf = &fp->rx_buf_ring[bd_cons];
1531 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1532 pad = cqe->fast_path_cqe.placement_offset;
1534 /* If CQE is marked both TPA_START and TPA_END
1535 it is a non-TPA CQE */
1536 if ((!fp->disable_tpa) &&
1537 (TPA_TYPE(cqe_fp_flags) !=
1538 (TPA_TYPE_START | TPA_TYPE_END))) {
1539 u16 queue = cqe->fast_path_cqe.queue_index;
1541 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1542 DP(NETIF_MSG_RX_STATUS,
1543 "calling tpa_start on queue %d\n",
1546 bnx2x_tpa_start(fp, queue, skb,
1551 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1552 DP(NETIF_MSG_RX_STATUS,
1553 "calling tpa_stop on queue %d\n",
1556 if (!BNX2X_RX_SUM_FIX(cqe))
1557 BNX2X_ERR("STOP on none TCP "
1560 /* This is a size of the linear data
1562 len = le16_to_cpu(cqe->fast_path_cqe.
1564 bnx2x_tpa_stop(bp, fp, queue, pad,
1565 len, cqe, comp_ring_cons);
1566 #ifdef BNX2X_STOP_ON_ERROR
1571 bnx2x_update_sge_prod(fp,
1572 &cqe->fast_path_cqe);
1577 pci_dma_sync_single_for_device(bp->pdev,
1578 pci_unmap_addr(rx_buf, mapping),
1579 pad + RX_COPY_THRESH,
1580 PCI_DMA_FROMDEVICE);
1582 prefetch(((char *)(skb)) + 128);
1584 /* is this an error packet? */
1585 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1586 DP(NETIF_MSG_RX_ERR,
1587 "ERROR flags %x rx packet %u\n",
1588 cqe_fp_flags, sw_comp_cons);
1589 fp->eth_q_stats.rx_err_discard_pkt++;
1593 /* Since we don't have a jumbo ring
1594 * copy small packets if mtu > 1500
1596 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1597 (len <= RX_COPY_THRESH)) {
1598 struct sk_buff *new_skb;
1600 new_skb = netdev_alloc_skb(bp->dev,
1602 if (new_skb == NULL) {
1603 DP(NETIF_MSG_RX_ERR,
1604 "ERROR packet dropped "
1605 "because of alloc failure\n");
1606 fp->eth_q_stats.rx_skb_alloc_failed++;
1611 skb_copy_from_linear_data_offset(skb, pad,
1612 new_skb->data + pad, len);
1613 skb_reserve(new_skb, pad);
1614 skb_put(new_skb, len);
1616 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1621 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1622 pci_unmap_single(bp->pdev,
1623 pci_unmap_addr(rx_buf, mapping),
1625 PCI_DMA_FROMDEVICE);
1626 skb_reserve(skb, pad);
1630 DP(NETIF_MSG_RX_ERR,
1631 "ERROR packet dropped because "
1632 "of alloc failure\n");
1633 fp->eth_q_stats.rx_skb_alloc_failed++;
1635 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1639 skb->protocol = eth_type_trans(skb, bp->dev);
1641 skb->ip_summed = CHECKSUM_NONE;
1643 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1644 skb->ip_summed = CHECKSUM_UNNECESSARY;
1646 fp->eth_q_stats.hw_csum_err++;
1650 skb_record_rx_queue(skb, fp->index);
1652 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1653 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1654 PARSING_FLAGS_VLAN))
1655 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1656 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1659 netif_receive_skb(skb);
1665 bd_cons = NEXT_RX_IDX(bd_cons);
1666 bd_prod = NEXT_RX_IDX(bd_prod);
1667 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1670 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1671 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1673 if (rx_pkt == budget)
1677 fp->rx_bd_cons = bd_cons;
1678 fp->rx_bd_prod = bd_prod_fw;
1679 fp->rx_comp_cons = sw_comp_cons;
1680 fp->rx_comp_prod = sw_comp_prod;
1682 /* Update producers */
1683 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1686 fp->rx_pkt += rx_pkt;
1692 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1694 struct bnx2x_fastpath *fp = fp_cookie;
1695 struct bnx2x *bp = fp->bp;
1697 /* Return here if interrupt is disabled */
1698 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1699 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1703 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1704 fp->index, fp->sb_id);
1705 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1707 #ifdef BNX2X_STOP_ON_ERROR
1708 if (unlikely(bp->panic))
1711 /* Handle Rx or Tx according to MSI-X vector */
1712 if (fp->is_rx_queue) {
1713 prefetch(fp->rx_cons_sb);
1714 prefetch(&fp->status_blk->u_status_block.status_block_index);
1716 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1719 prefetch(fp->tx_cons_sb);
1720 prefetch(&fp->status_blk->c_status_block.status_block_index);
1722 bnx2x_update_fpsb_idx(fp);
1726 /* Re-enable interrupts */
1727 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1728 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1729 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1730 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1736 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1738 struct bnx2x *bp = netdev_priv(dev_instance);
1739 u16 status = bnx2x_ack_int(bp);
1743 /* Return here if interrupt is shared and it's not for us */
1744 if (unlikely(status == 0)) {
1745 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1748 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1750 /* Return here if interrupt is disabled */
1751 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1752 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1756 #ifdef BNX2X_STOP_ON_ERROR
1757 if (unlikely(bp->panic))
1761 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1762 struct bnx2x_fastpath *fp = &bp->fp[i];
1764 mask = 0x2 << fp->sb_id;
1765 if (status & mask) {
1766 /* Handle Rx or Tx according to SB id */
1767 if (fp->is_rx_queue) {
1768 prefetch(fp->rx_cons_sb);
1769 prefetch(&fp->status_blk->u_status_block.
1770 status_block_index);
1772 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1775 prefetch(fp->tx_cons_sb);
1776 prefetch(&fp->status_blk->c_status_block.
1777 status_block_index);
1779 bnx2x_update_fpsb_idx(fp);
1783 /* Re-enable interrupts */
1784 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1785 le16_to_cpu(fp->fp_u_idx),
1787 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1788 le16_to_cpu(fp->fp_c_idx),
1796 if (unlikely(status & 0x1)) {
1797 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1805 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1811 /* end of fast path */
1813 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1818 * General service functions
1821 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1824 u32 resource_bit = (1 << resource);
1825 int func = BP_FUNC(bp);
1826 u32 hw_lock_control_reg;
1829 /* Validating that the resource is within range */
1830 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1832 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1833 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1838 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1840 hw_lock_control_reg =
1841 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1844 /* Validating that the resource is not already taken */
1845 lock_status = REG_RD(bp, hw_lock_control_reg);
1846 if (lock_status & resource_bit) {
1847 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1848 lock_status, resource_bit);
1852 /* Try for 5 second every 5ms */
1853 for (cnt = 0; cnt < 1000; cnt++) {
1854 /* Try to acquire the lock */
1855 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1856 lock_status = REG_RD(bp, hw_lock_control_reg);
1857 if (lock_status & resource_bit)
1862 DP(NETIF_MSG_HW, "Timeout\n");
1866 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1869 u32 resource_bit = (1 << resource);
1870 int func = BP_FUNC(bp);
1871 u32 hw_lock_control_reg;
1873 /* Validating that the resource is within range */
1874 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1876 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1877 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1882 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1884 hw_lock_control_reg =
1885 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1888 /* Validating that the resource is currently taken */
1889 lock_status = REG_RD(bp, hw_lock_control_reg);
1890 if (!(lock_status & resource_bit)) {
1891 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1892 lock_status, resource_bit);
1896 REG_WR(bp, hw_lock_control_reg, resource_bit);
1900 /* HW Lock for shared dual port PHYs */
1901 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1903 mutex_lock(&bp->port.phy_mutex);
1905 if (bp->port.need_hw_lock)
1906 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1909 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1911 if (bp->port.need_hw_lock)
1912 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1914 mutex_unlock(&bp->port.phy_mutex);
1917 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1919 /* The GPIO should be swapped if swap register is set and active */
1920 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1921 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1922 int gpio_shift = gpio_num +
1923 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1924 u32 gpio_mask = (1 << gpio_shift);
1928 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1929 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1933 /* read GPIO value */
1934 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1936 /* get the requested pin value */
1937 if ((gpio_reg & gpio_mask) == gpio_mask)
1942 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1947 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1949 /* The GPIO should be swapped if swap register is set and active */
1950 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1951 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1952 int gpio_shift = gpio_num +
1953 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1954 u32 gpio_mask = (1 << gpio_shift);
1957 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1958 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1962 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1963 /* read GPIO and mask except the float bits */
1964 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1967 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1968 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1969 gpio_num, gpio_shift);
1970 /* clear FLOAT and set CLR */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1975 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1976 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1977 gpio_num, gpio_shift);
1978 /* clear FLOAT and set SET */
1979 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1980 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1983 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1984 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1985 gpio_num, gpio_shift);
1987 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1994 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1995 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2000 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2002 /* The GPIO should be swapped if swap register is set and active */
2003 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2004 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2005 int gpio_shift = gpio_num +
2006 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2007 u32 gpio_mask = (1 << gpio_shift);
2010 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2011 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2015 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2017 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2020 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2021 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2022 "output low\n", gpio_num, gpio_shift);
2023 /* clear SET and set CLR */
2024 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2025 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2028 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2029 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2030 "output high\n", gpio_num, gpio_shift);
2031 /* clear CLR and set SET */
2032 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2033 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2040 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2041 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2046 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2048 u32 spio_mask = (1 << spio_num);
2051 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2052 (spio_num > MISC_REGISTERS_SPIO_7)) {
2053 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2057 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2058 /* read SPIO and mask except the float bits */
2059 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2062 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2063 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2064 /* clear FLOAT and set CLR */
2065 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2066 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2069 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2070 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2071 /* clear FLOAT and set SET */
2072 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2073 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2076 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2077 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2079 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2086 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2087 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2092 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2094 switch (bp->link_vars.ieee_fc &
2095 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2096 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2097 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2101 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2102 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2106 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2107 bp->port.advertising |= ADVERTISED_Asym_Pause;
2111 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2117 static void bnx2x_link_report(struct bnx2x *bp)
2119 if (bp->state == BNX2X_STATE_DISABLED) {
2120 netif_carrier_off(bp->dev);
2121 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2125 if (bp->link_vars.link_up) {
2126 if (bp->state == BNX2X_STATE_OPEN)
2127 netif_carrier_on(bp->dev);
2128 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2130 printk("%d Mbps ", bp->link_vars.line_speed);
2132 if (bp->link_vars.duplex == DUPLEX_FULL)
2133 printk("full duplex");
2135 printk("half duplex");
2137 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2138 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2139 printk(", receive ");
2140 if (bp->link_vars.flow_ctrl &
2142 printk("& transmit ");
2144 printk(", transmit ");
2146 printk("flow control ON");
2150 } else { /* link_down */
2151 netif_carrier_off(bp->dev);
2152 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2156 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2158 if (!BP_NOMCP(bp)) {
2161 /* Initialize link parameters structure variables */
2162 /* It is recommended to turn off RX FC for jumbo frames
2163 for better performance */
2164 if (bp->dev->mtu > 5000)
2165 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2167 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2169 bnx2x_acquire_phy_lock(bp);
2171 if (load_mode == LOAD_DIAG)
2172 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2174 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2176 bnx2x_release_phy_lock(bp);
2178 bnx2x_calc_fc_adv(bp);
2180 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2181 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2182 bnx2x_link_report(bp);
2187 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2191 static void bnx2x_link_set(struct bnx2x *bp)
2193 if (!BP_NOMCP(bp)) {
2194 bnx2x_acquire_phy_lock(bp);
2195 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2196 bnx2x_release_phy_lock(bp);
2198 bnx2x_calc_fc_adv(bp);
2200 BNX2X_ERR("Bootcode is missing - can not set link\n");
2203 static void bnx2x__link_reset(struct bnx2x *bp)
2205 if (!BP_NOMCP(bp)) {
2206 bnx2x_acquire_phy_lock(bp);
2207 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2208 bnx2x_release_phy_lock(bp);
2210 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2213 static u8 bnx2x_link_test(struct bnx2x *bp)
2217 bnx2x_acquire_phy_lock(bp);
2218 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2219 bnx2x_release_phy_lock(bp);
2224 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2226 u32 r_param = bp->link_vars.line_speed / 8;
2227 u32 fair_periodic_timeout_usec;
2230 memset(&(bp->cmng.rs_vars), 0,
2231 sizeof(struct rate_shaping_vars_per_port));
2232 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2234 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2235 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2237 /* this is the threshold below which no timer arming will occur
2238 1.25 coefficient is for the threshold to be a little bigger
2239 than the real time, to compensate for timer in-accuracy */
2240 bp->cmng.rs_vars.rs_threshold =
2241 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2243 /* resolution of fairness timer */
2244 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2245 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2246 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2248 /* this is the threshold below which we won't arm the timer anymore */
2249 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2251 /* we multiply by 1e3/8 to get bytes/msec.
2252 We don't want the credits to pass a credit
2253 of the t_fair*FAIR_MEM (algorithm resolution) */
2254 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2255 /* since each tick is 4 usec */
2256 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2259 /* Calculates the sum of vn_min_rates.
2260 It's needed for further normalizing of the min_rates.
2262 sum of vn_min_rates.
2264 0 - if all the min_rates are 0.
2265 In the later case fainess algorithm should be deactivated.
2266 If not all min_rates are zero then those that are zeroes will be set to 1.
2268 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2271 int port = BP_PORT(bp);
2274 bp->vn_weight_sum = 0;
2275 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2276 int func = 2*vn + port;
2277 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2278 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2279 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2281 /* Skip hidden vns */
2282 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2285 /* If min rate is zero - set it to 1 */
2287 vn_min_rate = DEF_MIN_RATE;
2291 bp->vn_weight_sum += vn_min_rate;
2294 /* ... only if all min rates are zeros - disable fairness */
2296 bp->vn_weight_sum = 0;
2299 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2301 struct rate_shaping_vars_per_vn m_rs_vn;
2302 struct fairness_vars_per_vn m_fair_vn;
2303 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2304 u16 vn_min_rate, vn_max_rate;
2307 /* If function is hidden - set min and max to zeroes */
2308 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2313 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2314 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2315 /* If fairness is enabled (not all min rates are zeroes) and
2316 if current min rate is zero - set it to 1.
2317 This is a requirement of the algorithm. */
2318 if (bp->vn_weight_sum && (vn_min_rate == 0))
2319 vn_min_rate = DEF_MIN_RATE;
2320 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2321 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2325 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2326 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2328 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2329 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2331 /* global vn counter - maximal Mbps for this vn */
2332 m_rs_vn.vn_counter.rate = vn_max_rate;
2334 /* quota - number of bytes transmitted in this period */
2335 m_rs_vn.vn_counter.quota =
2336 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2338 if (bp->vn_weight_sum) {
2339 /* credit for each period of the fairness algorithm:
2340 number of bytes in T_FAIR (the vn share the port rate).
2341 vn_weight_sum should not be larger than 10000, thus
2342 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2344 m_fair_vn.vn_credit_delta =
2345 max((u32)(vn_min_rate * (T_FAIR_COEF /
2346 (8 * bp->vn_weight_sum))),
2347 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2348 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2349 m_fair_vn.vn_credit_delta);
2352 /* Store it to internal memory */
2353 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2354 REG_WR(bp, BAR_XSTRORM_INTMEM +
2355 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2356 ((u32 *)(&m_rs_vn))[i]);
2358 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2359 REG_WR(bp, BAR_XSTRORM_INTMEM +
2360 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2361 ((u32 *)(&m_fair_vn))[i]);
2365 /* This function is called upon link interrupt */
2366 static void bnx2x_link_attn(struct bnx2x *bp)
2368 /* Make sure that we are synced with the current statistics */
2369 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2371 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2373 if (bp->link_vars.link_up) {
2375 /* dropless flow control */
2376 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2377 int port = BP_PORT(bp);
2378 u32 pause_enabled = 0;
2380 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2383 REG_WR(bp, BAR_USTRORM_INTMEM +
2384 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2388 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2389 struct host_port_stats *pstats;
2391 pstats = bnx2x_sp(bp, port_stats);
2392 /* reset old bmac stats */
2393 memset(&(pstats->mac_stx[0]), 0,
2394 sizeof(struct mac_stx));
2396 if ((bp->state == BNX2X_STATE_OPEN) ||
2397 (bp->state == BNX2X_STATE_DISABLED))
2398 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2401 /* indicate link status */
2402 bnx2x_link_report(bp);
2405 int port = BP_PORT(bp);
2409 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2410 if (vn == BP_E1HVN(bp))
2413 func = ((vn << 1) | port);
2415 /* Set the attention towards other drivers
2417 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2418 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2421 if (bp->link_vars.link_up) {
2424 /* Init rate shaping and fairness contexts */
2425 bnx2x_init_port_minmax(bp);
2427 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2428 bnx2x_init_vn_minmax(bp, 2*vn + port);
2430 /* Store it to internal memory */
2432 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2433 REG_WR(bp, BAR_XSTRORM_INTMEM +
2434 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2435 ((u32 *)(&bp->cmng))[i]);
2440 static void bnx2x__link_status_update(struct bnx2x *bp)
2442 int func = BP_FUNC(bp);
2444 if (bp->state != BNX2X_STATE_OPEN)
2447 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2449 if (bp->link_vars.link_up)
2450 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2452 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2454 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2455 bnx2x_calc_vn_weight_sum(bp);
2457 /* indicate link status */
2458 bnx2x_link_report(bp);
2461 static void bnx2x_pmf_update(struct bnx2x *bp)
2463 int port = BP_PORT(bp);
2467 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2469 /* enable nig attention */
2470 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2471 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2472 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2474 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2482 * General service functions
2485 /* send the MCP a request, block until there is a reply */
2486 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2488 int func = BP_FUNC(bp);
2489 u32 seq = ++bp->fw_seq;
2492 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2494 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2495 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2498 /* let the FW do it's magic ... */
2501 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2503 /* Give the FW up to 2 second (200*10ms) */
2504 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2506 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2507 cnt*delay, rc, seq);
2509 /* is this a reply to our command? */
2510 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2511 rc &= FW_MSG_CODE_MASK;
2514 BNX2X_ERR("FW failed to respond!\n");
2522 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2523 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2524 static void bnx2x_set_rx_mode(struct net_device *dev);
2526 static void bnx2x_e1h_disable(struct bnx2x *bp)
2528 int port = BP_PORT(bp);
2531 bp->rx_mode = BNX2X_RX_MODE_NONE;
2532 bnx2x_set_storm_rx_mode(bp);
2534 netif_tx_disable(bp->dev);
2535 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2537 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2539 bnx2x_set_mac_addr_e1h(bp, 0);
2541 for (i = 0; i < MC_HASH_SIZE; i++)
2542 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2544 netif_carrier_off(bp->dev);
2547 static void bnx2x_e1h_enable(struct bnx2x *bp)
2549 int port = BP_PORT(bp);
2551 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2553 bnx2x_set_mac_addr_e1h(bp, 1);
2555 /* Tx queue should be only reenabled */
2556 netif_tx_wake_all_queues(bp->dev);
2558 /* Initialize the receive filter. */
2559 bnx2x_set_rx_mode(bp->dev);
2562 static void bnx2x_update_min_max(struct bnx2x *bp)
2564 int port = BP_PORT(bp);
2567 /* Init rate shaping and fairness contexts */
2568 bnx2x_init_port_minmax(bp);
2570 bnx2x_calc_vn_weight_sum(bp);
2572 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2573 bnx2x_init_vn_minmax(bp, 2*vn + port);
2578 /* Set the attention towards other drivers on the same port */
2579 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2580 if (vn == BP_E1HVN(bp))
2583 func = ((vn << 1) | port);
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2585 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2588 /* Store it to internal memory */
2589 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2590 REG_WR(bp, BAR_XSTRORM_INTMEM +
2591 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2592 ((u32 *)(&bp->cmng))[i]);
2596 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2598 int func = BP_FUNC(bp);
2600 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2601 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2603 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2605 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2606 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2607 bp->state = BNX2X_STATE_DISABLED;
2609 bnx2x_e1h_disable(bp);
2611 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2612 bp->state = BNX2X_STATE_OPEN;
2614 bnx2x_e1h_enable(bp);
2616 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2618 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2620 bnx2x_update_min_max(bp);
2621 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2624 /* Report results to MCP */
2626 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2628 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2631 /* the slow path queue is odd since completions arrive on the fastpath ring */
2632 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2633 u32 data_hi, u32 data_lo, int common)
2635 int func = BP_FUNC(bp);
2637 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2638 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2639 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2640 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2641 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2643 #ifdef BNX2X_STOP_ON_ERROR
2644 if (unlikely(bp->panic))
2648 spin_lock_bh(&bp->spq_lock);
2650 if (!bp->spq_left) {
2651 BNX2X_ERR("BUG! SPQ ring full!\n");
2652 spin_unlock_bh(&bp->spq_lock);
2657 /* CID needs port number to be encoded int it */
2658 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2659 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2661 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2663 bp->spq_prod_bd->hdr.type |=
2664 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2666 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2667 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2671 if (bp->spq_prod_bd == bp->spq_last_bd) {
2672 bp->spq_prod_bd = bp->spq;
2673 bp->spq_prod_idx = 0;
2674 DP(NETIF_MSG_TIMER, "end of spq\n");
2681 /* Make sure that BD data is updated before writing the producer */
2684 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2689 spin_unlock_bh(&bp->spq_lock);
2693 /* acquire split MCP access lock register */
2694 static int bnx2x_acquire_alr(struct bnx2x *bp)
2701 for (j = 0; j < i*10; j++) {
2703 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2704 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2705 if (val & (1L << 31))
2710 if (!(val & (1L << 31))) {
2711 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2718 /* release split MCP access lock register */
2719 static void bnx2x_release_alr(struct bnx2x *bp)
2723 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2726 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2728 struct host_def_status_block *def_sb = bp->def_status_blk;
2731 barrier(); /* status block is written to by the chip */
2732 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2733 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2736 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2737 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2740 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2741 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2744 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2745 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2748 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2749 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2756 * slow path service functions
2759 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2761 int port = BP_PORT(bp);
2762 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2763 COMMAND_REG_ATTN_BITS_SET);
2764 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2765 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2766 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2767 NIG_REG_MASK_INTERRUPT_PORT0;
2771 if (bp->attn_state & asserted)
2772 BNX2X_ERR("IGU ERROR\n");
2774 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2775 aeu_mask = REG_RD(bp, aeu_addr);
2777 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2778 aeu_mask, asserted);
2779 aeu_mask &= ~(asserted & 0xff);
2780 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2782 REG_WR(bp, aeu_addr, aeu_mask);
2783 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2785 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2786 bp->attn_state |= asserted;
2787 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2789 if (asserted & ATTN_HARD_WIRED_MASK) {
2790 if (asserted & ATTN_NIG_FOR_FUNC) {
2792 bnx2x_acquire_phy_lock(bp);
2794 /* save nig interrupt mask */
2795 nig_mask = REG_RD(bp, nig_int_mask_addr);
2796 REG_WR(bp, nig_int_mask_addr, 0);
2798 bnx2x_link_attn(bp);
2800 /* handle unicore attn? */
2802 if (asserted & ATTN_SW_TIMER_4_FUNC)
2803 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2805 if (asserted & GPIO_2_FUNC)
2806 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2808 if (asserted & GPIO_3_FUNC)
2809 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2811 if (asserted & GPIO_4_FUNC)
2812 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2815 if (asserted & ATTN_GENERAL_ATTN_1) {
2816 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2817 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2819 if (asserted & ATTN_GENERAL_ATTN_2) {
2820 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2821 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2823 if (asserted & ATTN_GENERAL_ATTN_3) {
2824 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2825 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2828 if (asserted & ATTN_GENERAL_ATTN_4) {
2829 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2830 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2832 if (asserted & ATTN_GENERAL_ATTN_5) {
2833 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2834 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2836 if (asserted & ATTN_GENERAL_ATTN_6) {
2837 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2838 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2842 } /* if hardwired */
2844 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2846 REG_WR(bp, hc_addr, asserted);
2848 /* now set back the mask */
2849 if (asserted & ATTN_NIG_FOR_FUNC) {
2850 REG_WR(bp, nig_int_mask_addr, nig_mask);
2851 bnx2x_release_phy_lock(bp);
2855 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2857 int port = BP_PORT(bp);
2859 /* mark the failure */
2860 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2861 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2862 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2863 bp->link_params.ext_phy_config);
2865 /* log the failure */
2866 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2867 " the driver to shutdown the card to prevent permanent"
2868 " damage. Please contact Dell Support for assistance\n",
2871 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2873 int port = BP_PORT(bp);
2875 u32 val, swap_val, swap_override;
2877 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2878 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2880 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2882 val = REG_RD(bp, reg_offset);
2883 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2884 REG_WR(bp, reg_offset, val);
2886 BNX2X_ERR("SPIO5 hw attention\n");
2888 /* Fan failure attention */
2889 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2890 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2891 /* Low power mode is controlled by GPIO 2 */
2892 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2893 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2894 /* The PHY reset is controlled by GPIO 1 */
2895 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2896 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2899 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2900 /* The PHY reset is controlled by GPIO 1 */
2901 /* fake the port number to cancel the swap done in
2903 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2904 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2905 port = (swap_val && swap_override) ^ 1;
2906 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2907 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2913 bnx2x_fan_failure(bp);
2916 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2917 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2918 bnx2x_acquire_phy_lock(bp);
2919 bnx2x_handle_module_detect_int(&bp->link_params);
2920 bnx2x_release_phy_lock(bp);
2923 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2925 val = REG_RD(bp, reg_offset);
2926 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2927 REG_WR(bp, reg_offset, val);
2929 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2930 (attn & HW_INTERRUT_ASSERT_SET_0));
2935 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2939 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2941 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2942 BNX2X_ERR("DB hw attention 0x%x\n", val);
2943 /* DORQ discard attention */
2945 BNX2X_ERR("FATAL error from DORQ\n");
2948 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2950 int port = BP_PORT(bp);
2953 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2954 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2956 val = REG_RD(bp, reg_offset);
2957 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2958 REG_WR(bp, reg_offset, val);
2960 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2961 (attn & HW_INTERRUT_ASSERT_SET_1));
2966 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2970 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2972 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2973 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2974 /* CFC error attention */
2976 BNX2X_ERR("FATAL error from CFC\n");
2979 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2981 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2982 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2983 /* RQ_USDMDP_FIFO_OVERFLOW */
2985 BNX2X_ERR("FATAL error from PXP\n");
2988 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2990 int port = BP_PORT(bp);
2993 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2994 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2996 val = REG_RD(bp, reg_offset);
2997 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2998 REG_WR(bp, reg_offset, val);
3000 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3001 (attn & HW_INTERRUT_ASSERT_SET_2));
3006 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3010 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3012 if (attn & BNX2X_PMF_LINK_ASSERT) {
3013 int func = BP_FUNC(bp);
3015 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3016 val = SHMEM_RD(bp, func_mb[func].drv_status);
3017 if (val & DRV_STATUS_DCC_EVENT_MASK)
3019 (val & DRV_STATUS_DCC_EVENT_MASK));
3020 bnx2x__link_status_update(bp);
3021 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3022 bnx2x_pmf_update(bp);
3024 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3026 BNX2X_ERR("MC assert!\n");
3027 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3028 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3029 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3030 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3033 } else if (attn & BNX2X_MCP_ASSERT) {
3035 BNX2X_ERR("MCP assert!\n");
3036 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3040 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3043 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3044 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3045 if (attn & BNX2X_GRC_TIMEOUT) {
3046 val = CHIP_IS_E1H(bp) ?
3047 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3048 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3050 if (attn & BNX2X_GRC_RSV) {
3051 val = CHIP_IS_E1H(bp) ?
3052 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3053 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3055 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3059 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3061 struct attn_route attn;
3062 struct attn_route group_mask;
3063 int port = BP_PORT(bp);
3069 /* need to take HW lock because MCP or other port might also
3070 try to handle this event */
3071 bnx2x_acquire_alr(bp);
3073 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3074 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3075 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3076 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3077 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3078 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3080 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3081 if (deasserted & (1 << index)) {
3082 group_mask = bp->attn_group[index];
3084 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3085 index, group_mask.sig[0], group_mask.sig[1],
3086 group_mask.sig[2], group_mask.sig[3]);
3088 bnx2x_attn_int_deasserted3(bp,
3089 attn.sig[3] & group_mask.sig[3]);
3090 bnx2x_attn_int_deasserted1(bp,
3091 attn.sig[1] & group_mask.sig[1]);
3092 bnx2x_attn_int_deasserted2(bp,
3093 attn.sig[2] & group_mask.sig[2]);
3094 bnx2x_attn_int_deasserted0(bp,
3095 attn.sig[0] & group_mask.sig[0]);
3097 if ((attn.sig[0] & group_mask.sig[0] &
3098 HW_PRTY_ASSERT_SET_0) ||
3099 (attn.sig[1] & group_mask.sig[1] &
3100 HW_PRTY_ASSERT_SET_1) ||
3101 (attn.sig[2] & group_mask.sig[2] &
3102 HW_PRTY_ASSERT_SET_2))
3103 BNX2X_ERR("FATAL HW block parity attention\n");
3107 bnx2x_release_alr(bp);
3109 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3112 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3114 REG_WR(bp, reg_addr, val);
3116 if (~bp->attn_state & deasserted)
3117 BNX2X_ERR("IGU ERROR\n");
3119 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3120 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3122 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3123 aeu_mask = REG_RD(bp, reg_addr);
3125 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3126 aeu_mask, deasserted);
3127 aeu_mask |= (deasserted & 0xff);
3128 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3130 REG_WR(bp, reg_addr, aeu_mask);
3131 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3133 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3134 bp->attn_state &= ~deasserted;
3135 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3138 static void bnx2x_attn_int(struct bnx2x *bp)
3140 /* read local copy of bits */
3141 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3143 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3145 u32 attn_state = bp->attn_state;
3147 /* look for changed bits */
3148 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3149 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3152 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3153 attn_bits, attn_ack, asserted, deasserted);
3155 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3156 BNX2X_ERR("BAD attention state\n");
3158 /* handle bits that were raised */
3160 bnx2x_attn_int_asserted(bp, asserted);
3163 bnx2x_attn_int_deasserted(bp, deasserted);
3166 static void bnx2x_sp_task(struct work_struct *work)
3168 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3172 /* Return here if interrupt is disabled */
3173 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3174 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3178 status = bnx2x_update_dsb_idx(bp);
3179 /* if (status == 0) */
3180 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3182 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3188 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3190 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3192 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3194 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3196 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3201 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3203 struct net_device *dev = dev_instance;
3204 struct bnx2x *bp = netdev_priv(dev);
3206 /* Return here if interrupt is disabled */
3207 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3208 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3212 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3214 #ifdef BNX2X_STOP_ON_ERROR
3215 if (unlikely(bp->panic))
3219 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3224 /* end of slow path */
3228 /****************************************************************************
3230 ****************************************************************************/
3232 /* sum[hi:lo] += add[hi:lo] */
3233 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3236 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3239 /* difference = minuend - subtrahend */
3240 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3242 if (m_lo < s_lo) { \
3244 d_hi = m_hi - s_hi; \
3246 /* we can 'loan' 1 */ \
3248 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3250 /* m_hi <= s_hi */ \
3255 /* m_lo >= s_lo */ \
3256 if (m_hi < s_hi) { \
3260 /* m_hi >= s_hi */ \
3261 d_hi = m_hi - s_hi; \
3262 d_lo = m_lo - s_lo; \
3267 #define UPDATE_STAT64(s, t) \
3269 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3270 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3271 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3272 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3273 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3274 pstats->mac_stx[1].t##_lo, diff.lo); \
3277 #define UPDATE_STAT64_NIG(s, t) \
3279 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3280 diff.lo, new->s##_lo, old->s##_lo); \
3281 ADD_64(estats->t##_hi, diff.hi, \
3282 estats->t##_lo, diff.lo); \
3285 /* sum[hi:lo] += add */
3286 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3289 s_hi += (s_lo < a) ? 1 : 0; \
3292 #define UPDATE_EXTEND_STAT(s) \
3294 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3295 pstats->mac_stx[1].s##_lo, \
3299 #define UPDATE_EXTEND_TSTAT(s, t) \
3301 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3302 old_tclient->s = tclient->s; \
3303 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3306 #define UPDATE_EXTEND_USTAT(s, t) \
3308 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3309 old_uclient->s = uclient->s; \
3310 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3313 #define UPDATE_EXTEND_XSTAT(s, t) \
3315 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3316 old_xclient->s = xclient->s; \
3317 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3320 /* minuend -= subtrahend */
3321 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3323 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3326 /* minuend[hi:lo] -= subtrahend */
3327 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3329 SUB_64(m_hi, 0, m_lo, s); \
3332 #define SUB_EXTEND_USTAT(s, t) \
3334 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3335 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3339 * General service functions
3342 static inline long bnx2x_hilo(u32 *hiref)
3344 u32 lo = *(hiref + 1);
3345 #if (BITS_PER_LONG == 64)
3348 return HILO_U64(hi, lo);
3355 * Init service functions
3358 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3360 if (!bp->stats_pending) {
3361 struct eth_query_ramrod_data ramrod_data = {0};
3364 ramrod_data.drv_counter = bp->stats_counter++;
3365 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3366 for_each_queue(bp, i)
3367 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3369 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3370 ((u32 *)&ramrod_data)[1],
3371 ((u32 *)&ramrod_data)[0], 0);
3373 /* stats ramrod has it's own slot on the spq */
3375 bp->stats_pending = 1;
3380 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3382 struct dmae_command *dmae = &bp->stats_dmae;
3383 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3385 *stats_comp = DMAE_COMP_VAL;
3386 if (CHIP_REV_IS_SLOW(bp))
3390 if (bp->executer_idx) {
3391 int loader_idx = PMF_DMAE_C(bp);
3393 memset(dmae, 0, sizeof(struct dmae_command));
3395 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3396 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3397 DMAE_CMD_DST_RESET |
3399 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3401 DMAE_CMD_ENDIANITY_DW_SWAP |
3403 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3405 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3406 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3407 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3408 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3409 sizeof(struct dmae_command) *
3410 (loader_idx + 1)) >> 2;
3411 dmae->dst_addr_hi = 0;
3412 dmae->len = sizeof(struct dmae_command) >> 2;
3415 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3416 dmae->comp_addr_hi = 0;
3420 bnx2x_post_dmae(bp, dmae, loader_idx);
3422 } else if (bp->func_stx) {
3424 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3428 static int bnx2x_stats_comp(struct bnx2x *bp)
3430 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3434 while (*stats_comp != DMAE_COMP_VAL) {
3436 BNX2X_ERR("timeout waiting for stats finished\n");
3446 * Statistics service functions
3449 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3451 struct dmae_command *dmae;
3453 int loader_idx = PMF_DMAE_C(bp);
3454 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3457 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3458 BNX2X_ERR("BUG!\n");
3462 bp->executer_idx = 0;
3464 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3466 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3468 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3470 DMAE_CMD_ENDIANITY_DW_SWAP |
3472 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3473 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3475 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3476 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3477 dmae->src_addr_lo = bp->port.port_stx >> 2;
3478 dmae->src_addr_hi = 0;
3479 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3480 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3481 dmae->len = DMAE_LEN32_RD_MAX;
3482 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3483 dmae->comp_addr_hi = 0;
3486 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3487 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3488 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3489 dmae->src_addr_hi = 0;
3490 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3491 DMAE_LEN32_RD_MAX * 4);
3492 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3493 DMAE_LEN32_RD_MAX * 4);
3494 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3495 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3496 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3497 dmae->comp_val = DMAE_COMP_VAL;
3500 bnx2x_hw_stats_post(bp);
3501 bnx2x_stats_comp(bp);
3504 static void bnx2x_port_stats_init(struct bnx2x *bp)
3506 struct dmae_command *dmae;
3507 int port = BP_PORT(bp);
3508 int vn = BP_E1HVN(bp);
3510 int loader_idx = PMF_DMAE_C(bp);
3512 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3515 if (!bp->link_vars.link_up || !bp->port.pmf) {
3516 BNX2X_ERR("BUG!\n");
3520 bp->executer_idx = 0;
3523 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3524 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3525 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3527 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3529 DMAE_CMD_ENDIANITY_DW_SWAP |
3531 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3532 (vn << DMAE_CMD_E1HVN_SHIFT));
3534 if (bp->port.port_stx) {
3536 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3537 dmae->opcode = opcode;
3538 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3539 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3540 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3541 dmae->dst_addr_hi = 0;
3542 dmae->len = sizeof(struct host_port_stats) >> 2;
3543 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3544 dmae->comp_addr_hi = 0;
3550 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3551 dmae->opcode = opcode;
3552 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3553 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3554 dmae->dst_addr_lo = bp->func_stx >> 2;
3555 dmae->dst_addr_hi = 0;
3556 dmae->len = sizeof(struct host_func_stats) >> 2;
3557 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3558 dmae->comp_addr_hi = 0;
3563 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3564 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3565 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3567 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3569 DMAE_CMD_ENDIANITY_DW_SWAP |
3571 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3572 (vn << DMAE_CMD_E1HVN_SHIFT));
3574 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3576 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3577 NIG_REG_INGRESS_BMAC0_MEM);
3579 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3580 BIGMAC_REGISTER_TX_STAT_GTBYT */
3581 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3582 dmae->opcode = opcode;
3583 dmae->src_addr_lo = (mac_addr +
3584 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3585 dmae->src_addr_hi = 0;
3586 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3587 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3588 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3589 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3590 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3591 dmae->comp_addr_hi = 0;
3594 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3595 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3596 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3597 dmae->opcode = opcode;
3598 dmae->src_addr_lo = (mac_addr +
3599 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3600 dmae->src_addr_hi = 0;
3601 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3602 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3603 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3604 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3605 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3606 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3607 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3608 dmae->comp_addr_hi = 0;
3611 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3613 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3615 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
3618 dmae->src_addr_lo = (mac_addr +
3619 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3620 dmae->src_addr_hi = 0;
3621 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3622 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3623 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3624 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3625 dmae->comp_addr_hi = 0;
3628 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3629 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3630 dmae->opcode = opcode;
3631 dmae->src_addr_lo = (mac_addr +
3632 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3633 dmae->src_addr_hi = 0;
3634 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3635 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3637 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3639 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640 dmae->comp_addr_hi = 0;
3643 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3644 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3645 dmae->opcode = opcode;
3646 dmae->src_addr_lo = (mac_addr +
3647 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3648 dmae->src_addr_hi = 0;
3649 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3650 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3651 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3652 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3653 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3654 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3655 dmae->comp_addr_hi = 0;
3660 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661 dmae->opcode = opcode;
3662 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3663 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3664 dmae->src_addr_hi = 0;
3665 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3666 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3667 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3668 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3669 dmae->comp_addr_hi = 0;
3672 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3673 dmae->opcode = opcode;
3674 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3675 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3676 dmae->src_addr_hi = 0;
3677 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3678 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3679 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3680 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3681 dmae->len = (2*sizeof(u32)) >> 2;
3682 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3683 dmae->comp_addr_hi = 0;
3686 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3687 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3688 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3689 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3691 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3693 DMAE_CMD_ENDIANITY_DW_SWAP |
3695 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3696 (vn << DMAE_CMD_E1HVN_SHIFT));
3697 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3698 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3699 dmae->src_addr_hi = 0;
3700 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3701 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3703 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3704 dmae->len = (2*sizeof(u32)) >> 2;
3705 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3706 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3707 dmae->comp_val = DMAE_COMP_VAL;
3712 static void bnx2x_func_stats_init(struct bnx2x *bp)
3714 struct dmae_command *dmae = &bp->stats_dmae;
3715 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3718 if (!bp->func_stx) {
3719 BNX2X_ERR("BUG!\n");
3723 bp->executer_idx = 0;
3724 memset(dmae, 0, sizeof(struct dmae_command));
3726 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3727 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3728 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3730 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3732 DMAE_CMD_ENDIANITY_DW_SWAP |
3734 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3735 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3736 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3737 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3738 dmae->dst_addr_lo = bp->func_stx >> 2;
3739 dmae->dst_addr_hi = 0;
3740 dmae->len = sizeof(struct host_func_stats) >> 2;
3741 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3742 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3743 dmae->comp_val = DMAE_COMP_VAL;
3748 static void bnx2x_stats_start(struct bnx2x *bp)
3751 bnx2x_port_stats_init(bp);
3753 else if (bp->func_stx)
3754 bnx2x_func_stats_init(bp);
3756 bnx2x_hw_stats_post(bp);
3757 bnx2x_storm_stats_post(bp);
3760 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3762 bnx2x_stats_comp(bp);
3763 bnx2x_stats_pmf_update(bp);
3764 bnx2x_stats_start(bp);
3767 static void bnx2x_stats_restart(struct bnx2x *bp)
3769 bnx2x_stats_comp(bp);
3770 bnx2x_stats_start(bp);
3773 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3775 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3776 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3777 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3783 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3784 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3785 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3786 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3787 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3788 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3789 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3790 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3791 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3792 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3793 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3794 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3795 UPDATE_STAT64(tx_stat_gt127,
3796 tx_stat_etherstatspkts65octetsto127octets);
3797 UPDATE_STAT64(tx_stat_gt255,
3798 tx_stat_etherstatspkts128octetsto255octets);
3799 UPDATE_STAT64(tx_stat_gt511,
3800 tx_stat_etherstatspkts256octetsto511octets);
3801 UPDATE_STAT64(tx_stat_gt1023,
3802 tx_stat_etherstatspkts512octetsto1023octets);
3803 UPDATE_STAT64(tx_stat_gt1518,
3804 tx_stat_etherstatspkts1024octetsto1522octets);
3805 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3806 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3807 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3808 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3809 UPDATE_STAT64(tx_stat_gterr,
3810 tx_stat_dot3statsinternalmactransmiterrors);
3811 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3813 estats->pause_frames_received_hi =
3814 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3815 estats->pause_frames_received_lo =
3816 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3818 estats->pause_frames_sent_hi =
3819 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3820 estats->pause_frames_sent_lo =
3821 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3824 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3826 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3827 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3828 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3830 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3831 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3832 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3833 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3834 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3835 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3836 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3837 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3838 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3839 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3840 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3841 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3842 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3843 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3844 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3845 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3846 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3847 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3848 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3849 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3850 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3851 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3852 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3853 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3854 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3855 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3856 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3857 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3858 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3859 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3860 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3862 estats->pause_frames_received_hi =
3863 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3864 estats->pause_frames_received_lo =
3865 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3866 ADD_64(estats->pause_frames_received_hi,
3867 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3868 estats->pause_frames_received_lo,
3869 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3871 estats->pause_frames_sent_hi =
3872 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3873 estats->pause_frames_sent_lo =
3874 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3875 ADD_64(estats->pause_frames_sent_hi,
3876 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3877 estats->pause_frames_sent_lo,
3878 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3881 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3883 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3884 struct nig_stats *old = &(bp->port.old_nig_stats);
3885 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3886 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3893 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3894 bnx2x_bmac_stats_update(bp);
3896 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3897 bnx2x_emac_stats_update(bp);
3899 else { /* unreached */
3900 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3904 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3905 new->brb_discard - old->brb_discard);
3906 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3907 new->brb_truncate - old->brb_truncate);
3909 UPDATE_STAT64_NIG(egress_mac_pkt0,
3910 etherstatspkts1024octetsto1522octets);
3911 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3913 memcpy(old, new, sizeof(struct nig_stats));
3915 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3916 sizeof(struct mac_stx));
3917 estats->brb_drop_hi = pstats->brb_drop_hi;
3918 estats->brb_drop_lo = pstats->brb_drop_lo;
3920 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3922 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3923 if (nig_timer_max != estats->nig_timer_max) {
3924 estats->nig_timer_max = nig_timer_max;
3925 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3931 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3933 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3934 struct tstorm_per_port_stats *tport =
3935 &stats->tstorm_common.port_statistics;
3936 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3937 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3940 memcpy(&(fstats->total_bytes_received_hi),
3941 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3942 sizeof(struct host_func_stats) - 2*sizeof(u32));
3943 estats->error_bytes_received_hi = 0;
3944 estats->error_bytes_received_lo = 0;
3945 estats->etherstatsoverrsizepkts_hi = 0;
3946 estats->etherstatsoverrsizepkts_lo = 0;
3947 estats->no_buff_discard_hi = 0;
3948 estats->no_buff_discard_lo = 0;
3950 for_each_rx_queue(bp, i) {
3951 struct bnx2x_fastpath *fp = &bp->fp[i];
3952 int cl_id = fp->cl_id;
3953 struct tstorm_per_client_stats *tclient =
3954 &stats->tstorm_common.client_statistics[cl_id];
3955 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3956 struct ustorm_per_client_stats *uclient =
3957 &stats->ustorm_common.client_statistics[cl_id];
3958 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3959 struct xstorm_per_client_stats *xclient =
3960 &stats->xstorm_common.client_statistics[cl_id];
3961 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3962 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3965 /* are storm stats valid? */
3966 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3967 bp->stats_counter) {
3968 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3969 " xstorm counter (%d) != stats_counter (%d)\n",
3970 i, xclient->stats_counter, bp->stats_counter);
3973 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3974 bp->stats_counter) {
3975 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3976 " tstorm counter (%d) != stats_counter (%d)\n",
3977 i, tclient->stats_counter, bp->stats_counter);
3980 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3981 bp->stats_counter) {
3982 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3983 " ustorm counter (%d) != stats_counter (%d)\n",
3984 i, uclient->stats_counter, bp->stats_counter);
3988 qstats->total_bytes_received_hi =
3989 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3990 qstats->total_bytes_received_lo =
3991 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3993 ADD_64(qstats->total_bytes_received_hi,
3994 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3995 qstats->total_bytes_received_lo,
3996 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3998 ADD_64(qstats->total_bytes_received_hi,
3999 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4000 qstats->total_bytes_received_lo,
4001 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4003 qstats->valid_bytes_received_hi =
4004 qstats->total_bytes_received_hi;
4005 qstats->valid_bytes_received_lo =
4006 qstats->total_bytes_received_lo;
4008 qstats->error_bytes_received_hi =
4009 le32_to_cpu(tclient->rcv_error_bytes.hi);
4010 qstats->error_bytes_received_lo =
4011 le32_to_cpu(tclient->rcv_error_bytes.lo);
4013 ADD_64(qstats->total_bytes_received_hi,
4014 qstats->error_bytes_received_hi,
4015 qstats->total_bytes_received_lo,
4016 qstats->error_bytes_received_lo);
4018 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4019 total_unicast_packets_received);
4020 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4021 total_multicast_packets_received);
4022 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4023 total_broadcast_packets_received);
4024 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4025 etherstatsoverrsizepkts);
4026 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4028 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4029 total_unicast_packets_received);
4030 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4031 total_multicast_packets_received);
4032 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4033 total_broadcast_packets_received);
4034 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4035 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4036 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4038 qstats->total_bytes_transmitted_hi =
4039 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4040 qstats->total_bytes_transmitted_lo =
4041 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4043 ADD_64(qstats->total_bytes_transmitted_hi,
4044 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4045 qstats->total_bytes_transmitted_lo,
4046 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4048 ADD_64(qstats->total_bytes_transmitted_hi,
4049 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4050 qstats->total_bytes_transmitted_lo,
4051 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4053 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4054 total_unicast_packets_transmitted);
4055 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4056 total_multicast_packets_transmitted);
4057 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4058 total_broadcast_packets_transmitted);
4060 old_tclient->checksum_discard = tclient->checksum_discard;
4061 old_tclient->ttl0_discard = tclient->ttl0_discard;
4063 ADD_64(fstats->total_bytes_received_hi,
4064 qstats->total_bytes_received_hi,
4065 fstats->total_bytes_received_lo,
4066 qstats->total_bytes_received_lo);
4067 ADD_64(fstats->total_bytes_transmitted_hi,
4068 qstats->total_bytes_transmitted_hi,
4069 fstats->total_bytes_transmitted_lo,
4070 qstats->total_bytes_transmitted_lo);
4071 ADD_64(fstats->total_unicast_packets_received_hi,
4072 qstats->total_unicast_packets_received_hi,
4073 fstats->total_unicast_packets_received_lo,
4074 qstats->total_unicast_packets_received_lo);
4075 ADD_64(fstats->total_multicast_packets_received_hi,
4076 qstats->total_multicast_packets_received_hi,
4077 fstats->total_multicast_packets_received_lo,
4078 qstats->total_multicast_packets_received_lo);
4079 ADD_64(fstats->total_broadcast_packets_received_hi,
4080 qstats->total_broadcast_packets_received_hi,
4081 fstats->total_broadcast_packets_received_lo,
4082 qstats->total_broadcast_packets_received_lo);
4083 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4084 qstats->total_unicast_packets_transmitted_hi,
4085 fstats->total_unicast_packets_transmitted_lo,
4086 qstats->total_unicast_packets_transmitted_lo);
4087 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4088 qstats->total_multicast_packets_transmitted_hi,
4089 fstats->total_multicast_packets_transmitted_lo,
4090 qstats->total_multicast_packets_transmitted_lo);
4091 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4092 qstats->total_broadcast_packets_transmitted_hi,
4093 fstats->total_broadcast_packets_transmitted_lo,
4094 qstats->total_broadcast_packets_transmitted_lo);
4095 ADD_64(fstats->valid_bytes_received_hi,
4096 qstats->valid_bytes_received_hi,
4097 fstats->valid_bytes_received_lo,
4098 qstats->valid_bytes_received_lo);
4100 ADD_64(estats->error_bytes_received_hi,
4101 qstats->error_bytes_received_hi,
4102 estats->error_bytes_received_lo,
4103 qstats->error_bytes_received_lo);
4104 ADD_64(estats->etherstatsoverrsizepkts_hi,
4105 qstats->etherstatsoverrsizepkts_hi,
4106 estats->etherstatsoverrsizepkts_lo,
4107 qstats->etherstatsoverrsizepkts_lo);
4108 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4109 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4112 ADD_64(fstats->total_bytes_received_hi,
4113 estats->rx_stat_ifhcinbadoctets_hi,
4114 fstats->total_bytes_received_lo,
4115 estats->rx_stat_ifhcinbadoctets_lo);
4117 memcpy(estats, &(fstats->total_bytes_received_hi),
4118 sizeof(struct host_func_stats) - 2*sizeof(u32));
4120 ADD_64(estats->etherstatsoverrsizepkts_hi,
4121 estats->rx_stat_dot3statsframestoolong_hi,
4122 estats->etherstatsoverrsizepkts_lo,
4123 estats->rx_stat_dot3statsframestoolong_lo);
4124 ADD_64(estats->error_bytes_received_hi,
4125 estats->rx_stat_ifhcinbadoctets_hi,
4126 estats->error_bytes_received_lo,
4127 estats->rx_stat_ifhcinbadoctets_lo);
4130 estats->mac_filter_discard =
4131 le32_to_cpu(tport->mac_filter_discard);
4132 estats->xxoverflow_discard =
4133 le32_to_cpu(tport->xxoverflow_discard);
4134 estats->brb_truncate_discard =
4135 le32_to_cpu(tport->brb_truncate_discard);
4136 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4139 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4141 bp->stats_pending = 0;
4146 static void bnx2x_net_stats_update(struct bnx2x *bp)
4148 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4149 struct net_device_stats *nstats = &bp->dev->stats;
4152 nstats->rx_packets =
4153 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4154 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4155 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4157 nstats->tx_packets =
4158 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4159 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4160 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4162 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4164 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4166 nstats->rx_dropped = estats->mac_discard;
4167 for_each_rx_queue(bp, i)
4168 nstats->rx_dropped +=
4169 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4171 nstats->tx_dropped = 0;
4174 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4176 nstats->collisions =
4177 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4179 nstats->rx_length_errors =
4180 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4181 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4182 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4183 bnx2x_hilo(&estats->brb_truncate_hi);
4184 nstats->rx_crc_errors =
4185 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4186 nstats->rx_frame_errors =
4187 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4188 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4189 nstats->rx_missed_errors = estats->xxoverflow_discard;
4191 nstats->rx_errors = nstats->rx_length_errors +
4192 nstats->rx_over_errors +
4193 nstats->rx_crc_errors +
4194 nstats->rx_frame_errors +
4195 nstats->rx_fifo_errors +
4196 nstats->rx_missed_errors;
4198 nstats->tx_aborted_errors =
4199 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4200 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4201 nstats->tx_carrier_errors =
4202 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4203 nstats->tx_fifo_errors = 0;
4204 nstats->tx_heartbeat_errors = 0;
4205 nstats->tx_window_errors = 0;
4207 nstats->tx_errors = nstats->tx_aborted_errors +
4208 nstats->tx_carrier_errors +
4209 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4212 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4214 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4217 estats->driver_xoff = 0;
4218 estats->rx_err_discard_pkt = 0;
4219 estats->rx_skb_alloc_failed = 0;
4220 estats->hw_csum_err = 0;
4221 for_each_rx_queue(bp, i) {
4222 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4224 estats->driver_xoff += qstats->driver_xoff;
4225 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4226 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4227 estats->hw_csum_err += qstats->hw_csum_err;
4231 static void bnx2x_stats_update(struct bnx2x *bp)
4233 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4235 if (*stats_comp != DMAE_COMP_VAL)
4239 bnx2x_hw_stats_update(bp);
4241 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4242 BNX2X_ERR("storm stats were not updated for 3 times\n");
4247 bnx2x_net_stats_update(bp);
4248 bnx2x_drv_stats_update(bp);
4250 if (bp->msglevel & NETIF_MSG_TIMER) {
4251 struct bnx2x_fastpath *fp0_rx = bp->fp;
4252 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4253 struct tstorm_per_client_stats *old_tclient =
4254 &bp->fp->old_tclient;
4255 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4256 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4257 struct net_device_stats *nstats = &bp->dev->stats;
4260 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4261 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4263 bnx2x_tx_avail(fp0_tx),
4264 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4265 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4267 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4268 fp0_rx->rx_comp_cons),
4269 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4270 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4271 "brb truncate %u\n",
4272 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4273 qstats->driver_xoff,
4274 estats->brb_drop_lo, estats->brb_truncate_lo);
4275 printk(KERN_DEBUG "tstats: checksum_discard %u "
4276 "packets_too_big_discard %lu no_buff_discard %lu "
4277 "mac_discard %u mac_filter_discard %u "
4278 "xxovrflow_discard %u brb_truncate_discard %u "
4279 "ttl0_discard %u\n",
4280 le32_to_cpu(old_tclient->checksum_discard),
4281 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4282 bnx2x_hilo(&qstats->no_buff_discard_hi),
4283 estats->mac_discard, estats->mac_filter_discard,
4284 estats->xxoverflow_discard, estats->brb_truncate_discard,
4285 le32_to_cpu(old_tclient->ttl0_discard));
4287 for_each_queue(bp, i) {
4288 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4289 bnx2x_fp(bp, i, tx_pkt),
4290 bnx2x_fp(bp, i, rx_pkt),
4291 bnx2x_fp(bp, i, rx_calls));
4295 bnx2x_hw_stats_post(bp);
4296 bnx2x_storm_stats_post(bp);
4299 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4301 struct dmae_command *dmae;
4303 int loader_idx = PMF_DMAE_C(bp);
4304 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4306 bp->executer_idx = 0;
4308 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4310 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4312 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4314 DMAE_CMD_ENDIANITY_DW_SWAP |
4316 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4317 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4319 if (bp->port.port_stx) {
4321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4323 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4325 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4326 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4327 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4328 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4329 dmae->dst_addr_hi = 0;
4330 dmae->len = sizeof(struct host_port_stats) >> 2;
4332 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4333 dmae->comp_addr_hi = 0;
4336 dmae->comp_addr_lo =
4337 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4338 dmae->comp_addr_hi =
4339 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4340 dmae->comp_val = DMAE_COMP_VAL;
4348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4349 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4350 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4351 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4352 dmae->dst_addr_lo = bp->func_stx >> 2;
4353 dmae->dst_addr_hi = 0;
4354 dmae->len = sizeof(struct host_func_stats) >> 2;
4355 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4356 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4357 dmae->comp_val = DMAE_COMP_VAL;
4363 static void bnx2x_stats_stop(struct bnx2x *bp)
4367 bnx2x_stats_comp(bp);
4370 update = (bnx2x_hw_stats_update(bp) == 0);
4372 update |= (bnx2x_storm_stats_update(bp) == 0);
4375 bnx2x_net_stats_update(bp);
4378 bnx2x_port_stats_stop(bp);
4380 bnx2x_hw_stats_post(bp);
4381 bnx2x_stats_comp(bp);
4385 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4389 static const struct {
4390 void (*action)(struct bnx2x *bp);
4391 enum bnx2x_stats_state next_state;
4392 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4395 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4396 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4397 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4398 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4401 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4402 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4403 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4404 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4408 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4410 enum bnx2x_stats_state state = bp->stats_state;
4412 bnx2x_stats_stm[state][event].action(bp);
4413 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4415 /* Make sure the state has been "changed" */
4418 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4419 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4420 state, event, bp->stats_state);
4423 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4425 struct dmae_command *dmae;
4426 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4429 if (!bp->port.pmf || !bp->port.port_stx) {
4430 BNX2X_ERR("BUG!\n");
4434 bp->executer_idx = 0;
4436 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4437 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4438 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4439 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4441 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4443 DMAE_CMD_ENDIANITY_DW_SWAP |
4445 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4446 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4447 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4448 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4449 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4450 dmae->dst_addr_hi = 0;
4451 dmae->len = sizeof(struct host_port_stats) >> 2;
4452 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4453 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4454 dmae->comp_val = DMAE_COMP_VAL;
4457 bnx2x_hw_stats_post(bp);
4458 bnx2x_stats_comp(bp);
4461 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4463 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4464 int port = BP_PORT(bp);
4469 if (!bp->port.pmf || !bp->func_stx) {
4470 BNX2X_ERR("BUG!\n");
4474 /* save our func_stx */
4475 func_stx = bp->func_stx;
4477 for (vn = VN_0; vn < vn_max; vn++) {
4480 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4481 bnx2x_func_stats_init(bp);
4482 bnx2x_hw_stats_post(bp);
4483 bnx2x_stats_comp(bp);
4486 /* restore our func_stx */
4487 bp->func_stx = func_stx;
4490 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4492 struct dmae_command *dmae = &bp->stats_dmae;
4493 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4496 if (!bp->func_stx) {
4497 BNX2X_ERR("BUG!\n");
4501 bp->executer_idx = 0;
4502 memset(dmae, 0, sizeof(struct dmae_command));
4504 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4505 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4506 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4508 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4510 DMAE_CMD_ENDIANITY_DW_SWAP |
4512 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4513 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4514 dmae->src_addr_lo = bp->func_stx >> 2;
4515 dmae->src_addr_hi = 0;
4516 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4517 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4518 dmae->len = sizeof(struct host_func_stats) >> 2;
4519 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4520 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4521 dmae->comp_val = DMAE_COMP_VAL;
4524 bnx2x_hw_stats_post(bp);
4525 bnx2x_stats_comp(bp);
4528 static void bnx2x_stats_init(struct bnx2x *bp)
4530 int port = BP_PORT(bp);
4531 int func = BP_FUNC(bp);
4534 bp->stats_pending = 0;
4535 bp->executer_idx = 0;
4536 bp->stats_counter = 0;
4538 /* port and func stats for management */
4539 if (!BP_NOMCP(bp)) {
4540 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4541 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4544 bp->port.port_stx = 0;
4547 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4548 bp->port.port_stx, bp->func_stx);
4551 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4552 bp->port.old_nig_stats.brb_discard =
4553 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4554 bp->port.old_nig_stats.brb_truncate =
4555 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4556 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4557 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4558 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4559 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4561 /* function stats */
4562 for_each_queue(bp, i) {
4563 struct bnx2x_fastpath *fp = &bp->fp[i];
4565 memset(&fp->old_tclient, 0,
4566 sizeof(struct tstorm_per_client_stats));
4567 memset(&fp->old_uclient, 0,
4568 sizeof(struct ustorm_per_client_stats));
4569 memset(&fp->old_xclient, 0,
4570 sizeof(struct xstorm_per_client_stats));
4571 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4574 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4575 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4577 bp->stats_state = STATS_STATE_DISABLED;
4580 if (bp->port.port_stx)
4581 bnx2x_port_stats_base_init(bp);
4584 bnx2x_func_stats_base_init(bp);
4586 } else if (bp->func_stx)
4587 bnx2x_func_stats_base_update(bp);
4590 static void bnx2x_timer(unsigned long data)
4592 struct bnx2x *bp = (struct bnx2x *) data;
4594 if (!netif_running(bp->dev))
4597 if (atomic_read(&bp->intr_sem) != 0)
4601 struct bnx2x_fastpath *fp = &bp->fp[0];
4605 rc = bnx2x_rx_int(fp, 1000);
4608 if (!BP_NOMCP(bp)) {
4609 int func = BP_FUNC(bp);
4613 ++bp->fw_drv_pulse_wr_seq;
4614 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4615 /* TBD - add SYSTEM_TIME */
4616 drv_pulse = bp->fw_drv_pulse_wr_seq;
4617 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4619 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4620 MCP_PULSE_SEQ_MASK);
4621 /* The delta between driver pulse and mcp response
4622 * should be 1 (before mcp response) or 0 (after mcp response)
4624 if ((drv_pulse != mcp_pulse) &&
4625 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4626 /* someone lost a heartbeat... */
4627 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4628 drv_pulse, mcp_pulse);
4632 if ((bp->state == BNX2X_STATE_OPEN) ||
4633 (bp->state == BNX2X_STATE_DISABLED))
4634 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4637 mod_timer(&bp->timer, jiffies + bp->current_interval);
4640 /* end of Statistics */
4645 * nic init service functions
4648 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4650 int port = BP_PORT(bp);
4653 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4654 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4655 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4656 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4657 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4658 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4661 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4662 dma_addr_t mapping, int sb_id)
4664 int port = BP_PORT(bp);
4665 int func = BP_FUNC(bp);
4670 section = ((u64)mapping) + offsetof(struct host_status_block,
4672 sb->u_status_block.status_block_id = sb_id;
4674 REG_WR(bp, BAR_CSTRORM_INTMEM +
4675 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4676 REG_WR(bp, BAR_CSTRORM_INTMEM +
4677 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4679 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4680 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4682 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4683 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4684 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4687 section = ((u64)mapping) + offsetof(struct host_status_block,
4689 sb->c_status_block.status_block_id = sb_id;
4691 REG_WR(bp, BAR_CSTRORM_INTMEM +
4692 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4693 REG_WR(bp, BAR_CSTRORM_INTMEM +
4694 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4696 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4697 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4699 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4700 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4701 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4703 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4706 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4708 int func = BP_FUNC(bp);
4710 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4711 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4712 sizeof(struct tstorm_def_status_block)/4);
4713 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4714 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4715 sizeof(struct cstorm_def_status_block_u)/4);
4716 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4717 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4718 sizeof(struct cstorm_def_status_block_c)/4);
4719 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4720 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4721 sizeof(struct xstorm_def_status_block)/4);
4724 static void bnx2x_init_def_sb(struct bnx2x *bp,
4725 struct host_def_status_block *def_sb,
4726 dma_addr_t mapping, int sb_id)
4728 int port = BP_PORT(bp);
4729 int func = BP_FUNC(bp);
4730 int index, val, reg_offset;
4734 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4735 atten_status_block);
4736 def_sb->atten_status_block.status_block_id = sb_id;
4740 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4741 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4743 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4744 bp->attn_group[index].sig[0] = REG_RD(bp,
4745 reg_offset + 0x10*index);
4746 bp->attn_group[index].sig[1] = REG_RD(bp,
4747 reg_offset + 0x4 + 0x10*index);
4748 bp->attn_group[index].sig[2] = REG_RD(bp,
4749 reg_offset + 0x8 + 0x10*index);
4750 bp->attn_group[index].sig[3] = REG_RD(bp,
4751 reg_offset + 0xc + 0x10*index);
4754 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4755 HC_REG_ATTN_MSG0_ADDR_L);
4757 REG_WR(bp, reg_offset, U64_LO(section));
4758 REG_WR(bp, reg_offset + 4, U64_HI(section));
4760 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4762 val = REG_RD(bp, reg_offset);
4764 REG_WR(bp, reg_offset, val);
4767 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4768 u_def_status_block);
4769 def_sb->u_def_status_block.status_block_id = sb_id;
4771 REG_WR(bp, BAR_CSTRORM_INTMEM +
4772 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4773 REG_WR(bp, BAR_CSTRORM_INTMEM +
4774 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4776 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4777 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4779 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4780 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4781 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4784 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4785 c_def_status_block);
4786 def_sb->c_def_status_block.status_block_id = sb_id;
4788 REG_WR(bp, BAR_CSTRORM_INTMEM +
4789 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4790 REG_WR(bp, BAR_CSTRORM_INTMEM +
4791 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4793 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4794 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4796 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4797 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4798 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4801 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4802 t_def_status_block);
4803 def_sb->t_def_status_block.status_block_id = sb_id;
4805 REG_WR(bp, BAR_TSTRORM_INTMEM +
4806 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4807 REG_WR(bp, BAR_TSTRORM_INTMEM +
4808 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4810 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4811 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4813 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4814 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4815 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4818 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4819 x_def_status_block);
4820 def_sb->x_def_status_block.status_block_id = sb_id;
4822 REG_WR(bp, BAR_XSTRORM_INTMEM +
4823 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4824 REG_WR(bp, BAR_XSTRORM_INTMEM +
4825 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4827 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4828 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4830 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4831 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4832 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4834 bp->stats_pending = 0;
4835 bp->set_mac_pending = 0;
4837 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4840 static void bnx2x_update_coalesce(struct bnx2x *bp)
4842 int port = BP_PORT(bp);
4845 for_each_queue(bp, i) {
4846 int sb_id = bp->fp[i].sb_id;
4848 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4849 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4850 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4851 U_SB_ETH_RX_CQ_INDEX),
4853 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4854 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4855 U_SB_ETH_RX_CQ_INDEX),
4856 (bp->rx_ticks/12) ? 0 : 1);
4858 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4859 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4860 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4861 C_SB_ETH_TX_CQ_INDEX),
4863 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4864 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4865 C_SB_ETH_TX_CQ_INDEX),
4866 (bp->tx_ticks/12) ? 0 : 1);
4870 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4871 struct bnx2x_fastpath *fp, int last)
4875 for (i = 0; i < last; i++) {
4876 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4877 struct sk_buff *skb = rx_buf->skb;
4880 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4884 if (fp->tpa_state[i] == BNX2X_TPA_START)
4885 pci_unmap_single(bp->pdev,
4886 pci_unmap_addr(rx_buf, mapping),
4887 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4894 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4896 int func = BP_FUNC(bp);
4897 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4898 ETH_MAX_AGGREGATION_QUEUES_E1H;
4899 u16 ring_prod, cqe_ring_prod;
4902 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4904 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4906 if (bp->flags & TPA_ENABLE_FLAG) {
4908 for_each_rx_queue(bp, j) {
4909 struct bnx2x_fastpath *fp = &bp->fp[j];
4911 for (i = 0; i < max_agg_queues; i++) {
4912 fp->tpa_pool[i].skb =
4913 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4914 if (!fp->tpa_pool[i].skb) {
4915 BNX2X_ERR("Failed to allocate TPA "
4916 "skb pool for queue[%d] - "
4917 "disabling TPA on this "
4919 bnx2x_free_tpa_pool(bp, fp, i);
4920 fp->disable_tpa = 1;
4923 pci_unmap_addr_set((struct sw_rx_bd *)
4924 &bp->fp->tpa_pool[i],
4926 fp->tpa_state[i] = BNX2X_TPA_STOP;
4931 for_each_rx_queue(bp, j) {
4932 struct bnx2x_fastpath *fp = &bp->fp[j];
4935 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4936 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4938 /* Mark queue as Rx */
4939 fp->is_rx_queue = 1;
4941 /* "next page" elements initialization */
4943 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4944 struct eth_rx_sge *sge;
4946 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4948 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4949 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4951 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4952 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4955 bnx2x_init_sge_ring_bit_mask(fp);
4958 for (i = 1; i <= NUM_RX_RINGS; i++) {
4959 struct eth_rx_bd *rx_bd;
4961 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4963 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4964 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4966 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4967 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4971 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4972 struct eth_rx_cqe_next_page *nextpg;
4974 nextpg = (struct eth_rx_cqe_next_page *)
4975 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4977 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4978 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4980 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4981 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4984 /* Allocate SGEs and initialize the ring elements */
4985 for (i = 0, ring_prod = 0;
4986 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4988 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4989 BNX2X_ERR("was only able to allocate "
4991 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4992 /* Cleanup already allocated elements */
4993 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4994 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4995 fp->disable_tpa = 1;
4999 ring_prod = NEXT_SGE_IDX(ring_prod);
5001 fp->rx_sge_prod = ring_prod;
5003 /* Allocate BDs and initialize BD ring */
5004 fp->rx_comp_cons = 0;
5005 cqe_ring_prod = ring_prod = 0;
5006 for (i = 0; i < bp->rx_ring_size; i++) {
5007 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5008 BNX2X_ERR("was only able to allocate "
5009 "%d rx skbs on queue[%d]\n", i, j);
5010 fp->eth_q_stats.rx_skb_alloc_failed++;
5013 ring_prod = NEXT_RX_IDX(ring_prod);
5014 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5015 WARN_ON(ring_prod <= i);
5018 fp->rx_bd_prod = ring_prod;
5019 /* must not have more available CQEs than BDs */
5020 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5022 fp->rx_pkt = fp->rx_calls = 0;
5025 * this will generate an interrupt (to the TSTORM)
5026 * must only be done after chip is initialized
5028 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5033 REG_WR(bp, BAR_USTRORM_INTMEM +
5034 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5035 U64_LO(fp->rx_comp_mapping));
5036 REG_WR(bp, BAR_USTRORM_INTMEM +
5037 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5038 U64_HI(fp->rx_comp_mapping));
5042 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5046 for_each_tx_queue(bp, j) {
5047 struct bnx2x_fastpath *fp = &bp->fp[j];
5049 for (i = 1; i <= NUM_TX_RINGS; i++) {
5050 struct eth_tx_next_bd *tx_next_bd =
5051 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5053 tx_next_bd->addr_hi =
5054 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5055 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5056 tx_next_bd->addr_lo =
5057 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5058 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5061 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5062 fp->tx_db.data.zero_fill1 = 0;
5063 fp->tx_db.data.prod = 0;
5065 fp->tx_pkt_prod = 0;
5066 fp->tx_pkt_cons = 0;
5069 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5073 /* clean tx statistics */
5074 for_each_rx_queue(bp, i)
5075 bnx2x_fp(bp, i, tx_pkt) = 0;
5078 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5080 int func = BP_FUNC(bp);
5082 spin_lock_init(&bp->spq_lock);
5084 bp->spq_left = MAX_SPQ_PENDING;
5085 bp->spq_prod_idx = 0;
5086 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5087 bp->spq_prod_bd = bp->spq;
5088 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5090 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5091 U64_LO(bp->spq_mapping));
5093 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5094 U64_HI(bp->spq_mapping));
5096 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5100 static void bnx2x_init_context(struct bnx2x *bp)
5104 for_each_rx_queue(bp, i) {
5105 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5106 struct bnx2x_fastpath *fp = &bp->fp[i];
5107 u8 cl_id = fp->cl_id;
5109 context->ustorm_st_context.common.sb_index_numbers =
5110 BNX2X_RX_SB_INDEX_NUM;
5111 context->ustorm_st_context.common.clientId = cl_id;
5112 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5113 context->ustorm_st_context.common.flags =
5114 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5115 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5116 context->ustorm_st_context.common.statistics_counter_id =
5118 context->ustorm_st_context.common.mc_alignment_log_size =
5119 BNX2X_RX_ALIGN_SHIFT;
5120 context->ustorm_st_context.common.bd_buff_size =
5122 context->ustorm_st_context.common.bd_page_base_hi =
5123 U64_HI(fp->rx_desc_mapping);
5124 context->ustorm_st_context.common.bd_page_base_lo =
5125 U64_LO(fp->rx_desc_mapping);
5126 if (!fp->disable_tpa) {
5127 context->ustorm_st_context.common.flags |=
5128 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5129 context->ustorm_st_context.common.sge_buff_size =
5130 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5132 context->ustorm_st_context.common.sge_page_base_hi =
5133 U64_HI(fp->rx_sge_mapping);
5134 context->ustorm_st_context.common.sge_page_base_lo =
5135 U64_LO(fp->rx_sge_mapping);
5137 context->ustorm_st_context.common.max_sges_for_packet =
5138 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5139 context->ustorm_st_context.common.max_sges_for_packet =
5140 ((context->ustorm_st_context.common.
5141 max_sges_for_packet + PAGES_PER_SGE - 1) &
5142 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5145 context->ustorm_ag_context.cdu_usage =
5146 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5147 CDU_REGION_NUMBER_UCM_AG,
5148 ETH_CONNECTION_TYPE);
5150 context->xstorm_ag_context.cdu_reserved =
5151 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5152 CDU_REGION_NUMBER_XCM_AG,
5153 ETH_CONNECTION_TYPE);
5156 for_each_tx_queue(bp, i) {
5157 struct bnx2x_fastpath *fp = &bp->fp[i];
5158 struct eth_context *context =
5159 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5161 context->cstorm_st_context.sb_index_number =
5162 C_SB_ETH_TX_CQ_INDEX;
5163 context->cstorm_st_context.status_block_id = fp->sb_id;
5165 context->xstorm_st_context.tx_bd_page_base_hi =
5166 U64_HI(fp->tx_desc_mapping);
5167 context->xstorm_st_context.tx_bd_page_base_lo =
5168 U64_LO(fp->tx_desc_mapping);
5169 context->xstorm_st_context.statistics_data = (fp->cl_id |
5170 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5174 static void bnx2x_init_ind_table(struct bnx2x *bp)
5176 int func = BP_FUNC(bp);
5179 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5183 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5184 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5185 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5186 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5187 bp->fp->cl_id + (i % bp->num_rx_queues));
5190 static void bnx2x_set_client_config(struct bnx2x *bp)
5192 struct tstorm_eth_client_config tstorm_client = {0};
5193 int port = BP_PORT(bp);
5196 tstorm_client.mtu = bp->dev->mtu;
5197 tstorm_client.config_flags =
5198 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5199 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5201 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5202 tstorm_client.config_flags |=
5203 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5204 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5208 for_each_queue(bp, i) {
5209 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5211 REG_WR(bp, BAR_TSTRORM_INTMEM +
5212 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5213 ((u32 *)&tstorm_client)[0]);
5214 REG_WR(bp, BAR_TSTRORM_INTMEM +
5215 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5216 ((u32 *)&tstorm_client)[1]);
5219 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5220 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5223 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5225 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5226 int mode = bp->rx_mode;
5227 int mask = (1 << BP_L_ID(bp));
5228 int func = BP_FUNC(bp);
5229 int port = BP_PORT(bp);
5231 /* All but management unicast packets should pass to the host as well */
5233 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5234 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5235 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5236 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5238 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5241 case BNX2X_RX_MODE_NONE: /* no Rx */
5242 tstorm_mac_filter.ucast_drop_all = mask;
5243 tstorm_mac_filter.mcast_drop_all = mask;
5244 tstorm_mac_filter.bcast_drop_all = mask;
5247 case BNX2X_RX_MODE_NORMAL:
5248 tstorm_mac_filter.bcast_accept_all = mask;
5251 case BNX2X_RX_MODE_ALLMULTI:
5252 tstorm_mac_filter.mcast_accept_all = mask;
5253 tstorm_mac_filter.bcast_accept_all = mask;
5256 case BNX2X_RX_MODE_PROMISC:
5257 tstorm_mac_filter.ucast_accept_all = mask;
5258 tstorm_mac_filter.mcast_accept_all = mask;
5259 tstorm_mac_filter.bcast_accept_all = mask;
5260 /* pass management unicast packets as well */
5261 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5265 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5270 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5273 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5274 REG_WR(bp, BAR_TSTRORM_INTMEM +
5275 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5276 ((u32 *)&tstorm_mac_filter)[i]);
5278 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5279 ((u32 *)&tstorm_mac_filter)[i]); */
5282 if (mode != BNX2X_RX_MODE_NONE)
5283 bnx2x_set_client_config(bp);
5286 static void bnx2x_init_internal_common(struct bnx2x *bp)
5290 /* Zero this manually as its initialization is
5291 currently missing in the initTool */
5292 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5293 REG_WR(bp, BAR_USTRORM_INTMEM +
5294 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5297 static void bnx2x_init_internal_port(struct bnx2x *bp)
5299 int port = BP_PORT(bp);
5302 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5304 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5305 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5306 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5309 static void bnx2x_init_internal_func(struct bnx2x *bp)
5311 struct tstorm_eth_function_common_config tstorm_config = {0};
5312 struct stats_indication_flags stats_flags = {0};
5313 int port = BP_PORT(bp);
5314 int func = BP_FUNC(bp);
5320 tstorm_config.config_flags = MULTI_FLAGS(bp);
5321 tstorm_config.rss_result_mask = MULTI_MASK;
5324 /* Enable TPA if needed */
5325 if (bp->flags & TPA_ENABLE_FLAG)
5326 tstorm_config.config_flags |=
5327 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5330 tstorm_config.config_flags |=
5331 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5333 tstorm_config.leading_client_id = BP_L_ID(bp);
5335 REG_WR(bp, BAR_TSTRORM_INTMEM +
5336 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5337 (*(u32 *)&tstorm_config));
5339 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5340 bnx2x_set_storm_rx_mode(bp);
5342 for_each_queue(bp, i) {
5343 u8 cl_id = bp->fp[i].cl_id;
5345 /* reset xstorm per client statistics */
5346 offset = BAR_XSTRORM_INTMEM +
5347 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5349 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5350 REG_WR(bp, offset + j*4, 0);
5352 /* reset tstorm per client statistics */
5353 offset = BAR_TSTRORM_INTMEM +
5354 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5356 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5357 REG_WR(bp, offset + j*4, 0);
5359 /* reset ustorm per client statistics */
5360 offset = BAR_USTRORM_INTMEM +
5361 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5363 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5364 REG_WR(bp, offset + j*4, 0);
5367 /* Init statistics related context */
5368 stats_flags.collect_eth = 1;
5370 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5371 ((u32 *)&stats_flags)[0]);
5372 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5373 ((u32 *)&stats_flags)[1]);
5375 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5376 ((u32 *)&stats_flags)[0]);
5377 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5378 ((u32 *)&stats_flags)[1]);
5380 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5381 ((u32 *)&stats_flags)[0]);
5382 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5383 ((u32 *)&stats_flags)[1]);
5385 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5386 ((u32 *)&stats_flags)[0]);
5387 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5388 ((u32 *)&stats_flags)[1]);
5390 REG_WR(bp, BAR_XSTRORM_INTMEM +
5391 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5392 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5393 REG_WR(bp, BAR_XSTRORM_INTMEM +
5394 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5395 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5397 REG_WR(bp, BAR_TSTRORM_INTMEM +
5398 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5399 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5400 REG_WR(bp, BAR_TSTRORM_INTMEM +
5401 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5402 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5404 REG_WR(bp, BAR_USTRORM_INTMEM +
5405 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5406 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5407 REG_WR(bp, BAR_USTRORM_INTMEM +
5408 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5409 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5411 if (CHIP_IS_E1H(bp)) {
5412 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5414 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5416 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5418 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5421 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5425 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5427 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5428 SGE_PAGE_SIZE * PAGES_PER_SGE),
5430 for_each_rx_queue(bp, i) {
5431 struct bnx2x_fastpath *fp = &bp->fp[i];
5433 REG_WR(bp, BAR_USTRORM_INTMEM +
5434 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5435 U64_LO(fp->rx_comp_mapping));
5436 REG_WR(bp, BAR_USTRORM_INTMEM +
5437 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5438 U64_HI(fp->rx_comp_mapping));
5441 REG_WR(bp, BAR_USTRORM_INTMEM +
5442 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5443 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5444 REG_WR(bp, BAR_USTRORM_INTMEM +
5445 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5446 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5448 REG_WR16(bp, BAR_USTRORM_INTMEM +
5449 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5453 /* dropless flow control */
5454 if (CHIP_IS_E1H(bp)) {
5455 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5457 rx_pause.bd_thr_low = 250;
5458 rx_pause.cqe_thr_low = 250;
5460 rx_pause.sge_thr_low = 0;
5461 rx_pause.bd_thr_high = 350;
5462 rx_pause.cqe_thr_high = 350;
5463 rx_pause.sge_thr_high = 0;
5465 for_each_rx_queue(bp, i) {
5466 struct bnx2x_fastpath *fp = &bp->fp[i];
5468 if (!fp->disable_tpa) {
5469 rx_pause.sge_thr_low = 150;
5470 rx_pause.sge_thr_high = 250;
5474 offset = BAR_USTRORM_INTMEM +
5475 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5478 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5480 REG_WR(bp, offset + j*4,
5481 ((u32 *)&rx_pause)[j]);
5485 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5487 /* Init rate shaping and fairness contexts */
5491 /* During init there is no active link
5492 Until link is up, set link rate to 10Gbps */
5493 bp->link_vars.line_speed = SPEED_10000;
5494 bnx2x_init_port_minmax(bp);
5496 bnx2x_calc_vn_weight_sum(bp);
5498 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5499 bnx2x_init_vn_minmax(bp, 2*vn + port);
5501 /* Enable rate shaping and fairness */
5502 bp->cmng.flags.cmng_enables =
5503 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5504 if (bp->vn_weight_sum)
5505 bp->cmng.flags.cmng_enables |=
5506 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5508 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5509 " fairness will be disabled\n");
5511 /* rate shaping and fairness are disabled */
5513 "single function mode minmax will be disabled\n");
5517 /* Store it to internal memory */
5519 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5520 REG_WR(bp, BAR_XSTRORM_INTMEM +
5521 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5522 ((u32 *)(&bp->cmng))[i]);
5525 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5527 switch (load_code) {
5528 case FW_MSG_CODE_DRV_LOAD_COMMON:
5529 bnx2x_init_internal_common(bp);
5532 case FW_MSG_CODE_DRV_LOAD_PORT:
5533 bnx2x_init_internal_port(bp);
5536 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5537 bnx2x_init_internal_func(bp);
5541 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5546 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5550 for_each_queue(bp, i) {
5551 struct bnx2x_fastpath *fp = &bp->fp[i];
5554 fp->state = BNX2X_FP_STATE_CLOSED;
5556 fp->cl_id = BP_L_ID(bp) + i;
5557 fp->sb_id = fp->cl_id;
5558 /* Suitable Rx and Tx SBs are served by the same client */
5559 if (i >= bp->num_rx_queues)
5560 fp->cl_id -= bp->num_rx_queues;
5562 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5563 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5564 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5566 bnx2x_update_fpsb_idx(fp);
5569 /* ensure status block indices were read */
5573 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5575 bnx2x_update_dsb_idx(bp);
5576 bnx2x_update_coalesce(bp);
5577 bnx2x_init_rx_rings(bp);
5578 bnx2x_init_tx_ring(bp);
5579 bnx2x_init_sp_ring(bp);
5580 bnx2x_init_context(bp);
5581 bnx2x_init_internal(bp, load_code);
5582 bnx2x_init_ind_table(bp);
5583 bnx2x_stats_init(bp);
5585 /* At this point, we are ready for interrupts */
5586 atomic_set(&bp->intr_sem, 0);
5588 /* flush all before enabling interrupts */
5592 bnx2x_int_enable(bp);
5594 /* Check for SPIO5 */
5595 bnx2x_attn_int_deasserted0(bp,
5596 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5597 AEU_INPUTS_ATTN_BITS_SPIO5);
5600 /* end of nic init */
5603 * gzip service functions
5606 static int bnx2x_gunzip_init(struct bnx2x *bp)
5608 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5609 &bp->gunzip_mapping);
5610 if (bp->gunzip_buf == NULL)
5613 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5614 if (bp->strm == NULL)
5617 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5619 if (bp->strm->workspace == NULL)
5629 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5630 bp->gunzip_mapping);
5631 bp->gunzip_buf = NULL;
5634 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5635 " un-compression\n", bp->dev->name);
5639 static void bnx2x_gunzip_end(struct bnx2x *bp)
5641 kfree(bp->strm->workspace);
5646 if (bp->gunzip_buf) {
5647 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5648 bp->gunzip_mapping);
5649 bp->gunzip_buf = NULL;
5653 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5657 /* check gzip header */
5658 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5659 BNX2X_ERR("Bad gzip header\n");
5667 if (zbuf[3] & FNAME)
5668 while ((zbuf[n++] != 0) && (n < len));
5670 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5671 bp->strm->avail_in = len - n;
5672 bp->strm->next_out = bp->gunzip_buf;
5673 bp->strm->avail_out = FW_BUF_SIZE;
5675 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5679 rc = zlib_inflate(bp->strm, Z_FINISH);
5680 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5681 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5682 bp->dev->name, bp->strm->msg);
5684 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5685 if (bp->gunzip_outlen & 0x3)
5686 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5687 " gunzip_outlen (%d) not aligned\n",
5688 bp->dev->name, bp->gunzip_outlen);
5689 bp->gunzip_outlen >>= 2;
5691 zlib_inflateEnd(bp->strm);
5693 if (rc == Z_STREAM_END)
5699 /* nic load/unload */
5702 * General service functions
5705 /* send a NIG loopback debug packet */
5706 static void bnx2x_lb_pckt(struct bnx2x *bp)
5710 /* Ethernet source and destination addresses */
5711 wb_write[0] = 0x55555555;
5712 wb_write[1] = 0x55555555;
5713 wb_write[2] = 0x20; /* SOP */
5714 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5716 /* NON-IP protocol */
5717 wb_write[0] = 0x09000000;
5718 wb_write[1] = 0x55555555;
5719 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5720 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5723 /* some of the internal memories
5724 * are not directly readable from the driver
5725 * to test them we send debug packets
5727 static int bnx2x_int_mem_test(struct bnx2x *bp)
5733 if (CHIP_REV_IS_FPGA(bp))
5735 else if (CHIP_REV_IS_EMUL(bp))
5740 DP(NETIF_MSG_HW, "start part1\n");
5742 /* Disable inputs of parser neighbor blocks */
5743 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5744 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5745 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5746 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5748 /* Write 0 to parser credits for CFC search request */
5749 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5751 /* send Ethernet packet */
5754 /* TODO do i reset NIG statistic? */
5755 /* Wait until NIG register shows 1 packet of size 0x10 */
5756 count = 1000 * factor;
5759 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5760 val = *bnx2x_sp(bp, wb_data[0]);
5768 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5772 /* Wait until PRS register shows 1 packet */
5773 count = 1000 * factor;
5775 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5783 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5787 /* Reset and init BRB, PRS */
5788 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5790 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5792 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5793 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5795 DP(NETIF_MSG_HW, "part2\n");
5797 /* Disable inputs of parser neighbor blocks */
5798 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5799 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5800 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5801 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5803 /* Write 0 to parser credits for CFC search request */
5804 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5806 /* send 10 Ethernet packets */
5807 for (i = 0; i < 10; i++)
5810 /* Wait until NIG register shows 10 + 1
5811 packets of size 11*0x10 = 0xb0 */
5812 count = 1000 * factor;
5815 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5816 val = *bnx2x_sp(bp, wb_data[0]);
5824 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5828 /* Wait until PRS register shows 2 packets */
5829 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5831 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5833 /* Write 1 to parser credits for CFC search request */
5834 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5836 /* Wait until PRS register shows 3 packets */
5837 msleep(10 * factor);
5838 /* Wait until NIG register shows 1 packet of size 0x10 */
5839 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5841 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5843 /* clear NIG EOP FIFO */
5844 for (i = 0; i < 11; i++)
5845 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5846 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5848 BNX2X_ERR("clear of NIG failed\n");
5852 /* Reset and init BRB, PRS, NIG */
5853 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5855 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5857 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5858 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5861 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5864 /* Enable inputs of parser neighbor blocks */
5865 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5866 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5867 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5868 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5870 DP(NETIF_MSG_HW, "done\n");
5875 static void enable_blocks_attention(struct bnx2x *bp)
5877 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5878 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5879 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5880 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5881 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5882 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5883 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5884 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5885 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5886 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5887 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5888 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5889 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5890 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5891 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5892 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5893 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5894 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5895 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5896 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5897 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5898 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5899 if (CHIP_REV_IS_FPGA(bp))
5900 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5902 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5903 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5904 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5905 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5906 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5907 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5908 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5909 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5910 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5911 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5915 static void bnx2x_reset_common(struct bnx2x *bp)
5918 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5920 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5924 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5930 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5931 SHARED_HW_CFG_FAN_FAILURE_MASK;
5933 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5937 * The fan failure mechanism is usually related to the PHY type since
5938 * the power consumption of the board is affected by the PHY. Currently,
5939 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5941 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5942 for (port = PORT_0; port < PORT_MAX; port++) {
5944 SHMEM_RD(bp, dev_info.port_hw_config[port].
5945 external_phy_config) &
5946 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5949 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5951 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5953 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5956 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5958 if (is_required == 0)
5961 /* Fan failure is indicated by SPIO 5 */
5962 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5963 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5965 /* set to active low mode */
5966 val = REG_RD(bp, MISC_REG_SPIO_INT);
5967 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5968 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5969 REG_WR(bp, MISC_REG_SPIO_INT, val);
5971 /* enable interrupt to signal the IGU */
5972 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5973 val |= (1 << MISC_REGISTERS_SPIO_5);
5974 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5977 static int bnx2x_init_common(struct bnx2x *bp)
5981 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5983 bnx2x_reset_common(bp);
5984 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5985 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5987 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5988 if (CHIP_IS_E1H(bp))
5989 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5991 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5993 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5995 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5996 if (CHIP_IS_E1(bp)) {
5997 /* enable HW interrupt from PXP on USDM overflow
5998 bit 16 on INT_MASK_0 */
5999 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6002 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6006 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6007 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6008 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6009 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6010 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6011 /* make sure this value is 0 */
6012 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6014 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6015 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6016 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6017 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6018 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6021 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6023 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6024 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6025 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6028 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6029 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6031 /* let the HW do it's magic ... */
6033 /* finish PXP init */
6034 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6036 BNX2X_ERR("PXP2 CFG failed\n");
6039 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6041 BNX2X_ERR("PXP2 RD_INIT failed\n");
6045 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6046 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6048 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6050 /* clean the DMAE memory */
6052 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6054 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6055 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6056 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6057 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6059 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6060 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6061 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6062 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6064 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6065 /* soft reset pulse */
6066 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6067 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6070 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6073 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6074 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6075 if (!CHIP_REV_IS_SLOW(bp)) {
6076 /* enable hw interrupt from doorbell Q */
6077 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6080 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6081 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6082 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6084 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6085 if (CHIP_IS_E1H(bp))
6086 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6088 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6089 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6090 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6091 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6093 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6094 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6095 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6096 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6098 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6099 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6100 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6101 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6104 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6106 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6109 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6110 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6111 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6113 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6114 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6115 REG_WR(bp, i, 0xc0cac01a);
6116 /* TODO: replace with something meaningful */
6118 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6119 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6121 if (sizeof(union cdu_context) != 1024)
6122 /* we currently assume that a context is 1024 bytes */
6123 printk(KERN_ALERT PFX "please adjust the size of"
6124 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6126 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6127 val = (4 << 24) + (0 << 12) + 1024;
6128 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6130 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6131 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6132 /* enable context validation interrupt from CFC */
6133 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6135 /* set the thresholds to prevent CFC/CDU race */
6136 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6138 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6139 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6141 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6142 /* Reset PCIE errors for debug */
6143 REG_WR(bp, 0x2814, 0xffffffff);
6144 REG_WR(bp, 0x3820, 0xffffffff);
6146 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6147 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6148 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6149 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6151 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6152 if (CHIP_IS_E1H(bp)) {
6153 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6154 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6157 if (CHIP_REV_IS_SLOW(bp))
6160 /* finish CFC init */
6161 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6163 BNX2X_ERR("CFC LL_INIT failed\n");
6166 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6168 BNX2X_ERR("CFC AC_INIT failed\n");
6171 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6173 BNX2X_ERR("CFC CAM_INIT failed\n");
6176 REG_WR(bp, CFC_REG_DEBUG0, 0);
6178 /* read NIG statistic
6179 to see if this is our first up since powerup */
6180 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6181 val = *bnx2x_sp(bp, wb_data[0]);
6183 /* do internal memory self test */
6184 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6185 BNX2X_ERR("internal mem self test failed\n");
6189 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6190 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6191 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6192 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6193 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6194 bp->port.need_hw_lock = 1;
6201 bnx2x_setup_fan_failure_detection(bp);
6203 /* clear PXP2 attentions */
6204 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6206 enable_blocks_attention(bp);
6208 if (!BP_NOMCP(bp)) {
6209 bnx2x_acquire_phy_lock(bp);
6210 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6211 bnx2x_release_phy_lock(bp);
6213 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6218 static int bnx2x_init_port(struct bnx2x *bp)
6220 int port = BP_PORT(bp);
6221 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6225 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6227 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6229 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6230 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6232 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6233 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6234 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6239 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6240 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6241 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6242 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6247 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6248 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6249 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6250 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6255 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6256 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6257 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6258 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6260 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6263 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6264 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6266 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6268 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6270 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6271 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6272 /* no pause for emulation and FPGA */
6277 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6278 else if (bp->dev->mtu > 4096) {
6279 if (bp->flags & ONE_PORT_FLAG)
6283 /* (24*1024 + val*4)/256 */
6284 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6287 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6288 high = low + 56; /* 14*1024/256 */
6290 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6291 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6294 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6296 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6297 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6298 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6299 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6301 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6302 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6303 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6304 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6306 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6307 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6309 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6311 /* configure PBF to work without PAUSE mtu 9000 */
6312 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6314 /* update threshold */
6315 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6316 /* update init credit */
6317 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6320 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6322 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6325 /* tell the searcher where the T2 table is */
6326 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6328 wb_write[0] = U64_LO(bp->t2_mapping);
6329 wb_write[1] = U64_HI(bp->t2_mapping);
6330 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6331 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6332 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6333 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6335 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6337 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6338 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6340 if (CHIP_IS_E1(bp)) {
6341 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6342 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6344 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6346 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6347 /* init aeu_mask_attn_func_0/1:
6348 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6349 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6350 * bits 4-7 are used for "per vn group attention" */
6351 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6352 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6354 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6355 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6356 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6357 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6358 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6360 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6362 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6364 if (CHIP_IS_E1H(bp)) {
6365 /* 0x2 disable e1hov, 0x1 enable */
6366 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6367 (IS_E1HMF(bp) ? 0x1 : 0x2));
6370 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6371 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6372 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6376 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6377 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6379 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6380 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6382 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6384 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6385 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6387 /* The GPIO should be swapped if the swap register is
6389 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6390 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6392 /* Select function upon port-swap configuration */
6394 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6395 aeu_gpio_mask = (swap_val && swap_override) ?
6396 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6397 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6399 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6400 aeu_gpio_mask = (swap_val && swap_override) ?
6401 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6402 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6404 val = REG_RD(bp, offset);
6405 /* add GPIO3 to group */
6406 val |= aeu_gpio_mask;
6407 REG_WR(bp, offset, val);
6411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6412 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6413 /* add SPIO 5 to group 0 */
6415 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6416 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6417 val = REG_RD(bp, reg_addr);
6418 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6419 REG_WR(bp, reg_addr, val);
6427 bnx2x__link_reset(bp);
6432 #define ILT_PER_FUNC (768/2)
6433 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6434 /* the phys address is shifted right 12 bits and has an added
6435 1=valid bit added to the 53rd bit
6436 then since this is a wide register(TM)
6437 we split it into two 32 bit writes
6439 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6440 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6441 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6442 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6444 #define CNIC_ILT_LINES 0
6446 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6450 if (CHIP_IS_E1H(bp))
6451 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6453 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6455 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6458 static int bnx2x_init_func(struct bnx2x *bp)
6460 int port = BP_PORT(bp);
6461 int func = BP_FUNC(bp);
6465 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6467 /* set MSI reconfigure capability */
6468 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6469 val = REG_RD(bp, addr);
6470 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6471 REG_WR(bp, addr, val);
6473 i = FUNC_ILT_BASE(func);
6475 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6476 if (CHIP_IS_E1H(bp)) {
6477 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6478 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6480 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6481 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6484 if (CHIP_IS_E1H(bp)) {
6485 for (i = 0; i < 9; i++)
6486 bnx2x_init_block(bp,
6487 cm_blocks[i], FUNC0_STAGE + func);
6489 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6490 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6493 /* HC init per function */
6494 if (CHIP_IS_E1H(bp)) {
6495 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6497 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6500 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6502 /* Reset PCIE errors for debug */
6503 REG_WR(bp, 0x2114, 0xffffffff);
6504 REG_WR(bp, 0x2120, 0xffffffff);
6509 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6513 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6514 BP_FUNC(bp), load_code);
6517 mutex_init(&bp->dmae_mutex);
6518 rc = bnx2x_gunzip_init(bp);
6522 switch (load_code) {
6523 case FW_MSG_CODE_DRV_LOAD_COMMON:
6524 rc = bnx2x_init_common(bp);
6529 case FW_MSG_CODE_DRV_LOAD_PORT:
6531 rc = bnx2x_init_port(bp);
6536 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6538 rc = bnx2x_init_func(bp);
6544 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6548 if (!BP_NOMCP(bp)) {
6549 int func = BP_FUNC(bp);
6551 bp->fw_drv_pulse_wr_seq =
6552 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6553 DRV_PULSE_SEQ_MASK);
6554 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6557 /* this needs to be done before gunzip end */
6558 bnx2x_zero_def_sb(bp);
6559 for_each_queue(bp, i)
6560 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6563 bnx2x_gunzip_end(bp);
6568 static void bnx2x_free_mem(struct bnx2x *bp)
6571 #define BNX2X_PCI_FREE(x, y, size) \
6574 pci_free_consistent(bp->pdev, size, x, y); \
6580 #define BNX2X_FREE(x) \
6592 for_each_queue(bp, i) {
6595 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6596 bnx2x_fp(bp, i, status_blk_mapping),
6597 sizeof(struct host_status_block));
6600 for_each_rx_queue(bp, i) {
6602 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6603 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6604 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6605 bnx2x_fp(bp, i, rx_desc_mapping),
6606 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6608 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6609 bnx2x_fp(bp, i, rx_comp_mapping),
6610 sizeof(struct eth_fast_path_rx_cqe) *
6614 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6615 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6616 bnx2x_fp(bp, i, rx_sge_mapping),
6617 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6620 for_each_tx_queue(bp, i) {
6622 /* fastpath tx rings: tx_buf tx_desc */
6623 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6624 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6625 bnx2x_fp(bp, i, tx_desc_mapping),
6626 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6628 /* end of fastpath */
6630 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6631 sizeof(struct host_def_status_block));
6633 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6634 sizeof(struct bnx2x_slowpath));
6637 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6638 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6639 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6640 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6642 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6644 #undef BNX2X_PCI_FREE
6648 static int bnx2x_alloc_mem(struct bnx2x *bp)
6651 #define BNX2X_PCI_ALLOC(x, y, size) \
6653 x = pci_alloc_consistent(bp->pdev, size, y); \
6655 goto alloc_mem_err; \
6656 memset(x, 0, size); \
6659 #define BNX2X_ALLOC(x, size) \
6661 x = vmalloc(size); \
6663 goto alloc_mem_err; \
6664 memset(x, 0, size); \
6671 for_each_queue(bp, i) {
6672 bnx2x_fp(bp, i, bp) = bp;
6675 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6676 &bnx2x_fp(bp, i, status_blk_mapping),
6677 sizeof(struct host_status_block));
6680 for_each_rx_queue(bp, i) {
6682 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6683 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6684 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6685 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6686 &bnx2x_fp(bp, i, rx_desc_mapping),
6687 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6689 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6690 &bnx2x_fp(bp, i, rx_comp_mapping),
6691 sizeof(struct eth_fast_path_rx_cqe) *
6695 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6696 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6697 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6698 &bnx2x_fp(bp, i, rx_sge_mapping),
6699 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6702 for_each_tx_queue(bp, i) {
6704 /* fastpath tx rings: tx_buf tx_desc */
6705 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6706 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6707 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6708 &bnx2x_fp(bp, i, tx_desc_mapping),
6709 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6711 /* end of fastpath */
6713 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6714 sizeof(struct host_def_status_block));
6716 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6717 sizeof(struct bnx2x_slowpath));
6720 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6723 for (i = 0; i < 64*1024; i += 64) {
6724 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6725 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6728 /* allocate searcher T2 table
6729 we allocate 1/4 of alloc num for T2
6730 (which is not entered into the ILT) */
6731 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6734 for (i = 0; i < 16*1024; i += 64)
6735 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6737 /* now fixup the last line in the block to point to the next block */
6738 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6740 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6741 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6743 /* QM queues (128*MAX_CONN) */
6744 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6747 /* Slow path ring */
6748 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6756 #undef BNX2X_PCI_ALLOC
6760 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6764 for_each_tx_queue(bp, i) {
6765 struct bnx2x_fastpath *fp = &bp->fp[i];
6767 u16 bd_cons = fp->tx_bd_cons;
6768 u16 sw_prod = fp->tx_pkt_prod;
6769 u16 sw_cons = fp->tx_pkt_cons;
6771 while (sw_cons != sw_prod) {
6772 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6778 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6782 for_each_rx_queue(bp, j) {
6783 struct bnx2x_fastpath *fp = &bp->fp[j];
6785 for (i = 0; i < NUM_RX_BD; i++) {
6786 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6787 struct sk_buff *skb = rx_buf->skb;
6792 pci_unmap_single(bp->pdev,
6793 pci_unmap_addr(rx_buf, mapping),
6794 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6799 if (!fp->disable_tpa)
6800 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6801 ETH_MAX_AGGREGATION_QUEUES_E1 :
6802 ETH_MAX_AGGREGATION_QUEUES_E1H);
6806 static void bnx2x_free_skbs(struct bnx2x *bp)
6808 bnx2x_free_tx_skbs(bp);
6809 bnx2x_free_rx_skbs(bp);
6812 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6816 free_irq(bp->msix_table[0].vector, bp->dev);
6817 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6818 bp->msix_table[0].vector);
6820 for_each_queue(bp, i) {
6821 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6822 "state %x\n", i, bp->msix_table[i + offset].vector,
6823 bnx2x_fp(bp, i, state));
6825 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6829 static void bnx2x_free_irq(struct bnx2x *bp)
6831 if (bp->flags & USING_MSIX_FLAG) {
6832 bnx2x_free_msix_irqs(bp);
6833 pci_disable_msix(bp->pdev);
6834 bp->flags &= ~USING_MSIX_FLAG;
6836 } else if (bp->flags & USING_MSI_FLAG) {
6837 free_irq(bp->pdev->irq, bp->dev);
6838 pci_disable_msi(bp->pdev);
6839 bp->flags &= ~USING_MSI_FLAG;
6842 free_irq(bp->pdev->irq, bp->dev);
6845 static int bnx2x_enable_msix(struct bnx2x *bp)
6847 int i, rc, offset = 1;
6850 bp->msix_table[0].entry = igu_vec;
6851 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6853 for_each_queue(bp, i) {
6854 igu_vec = BP_L_ID(bp) + offset + i;
6855 bp->msix_table[i + offset].entry = igu_vec;
6856 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6857 "(fastpath #%u)\n", i + offset, igu_vec, i);
6860 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6861 BNX2X_NUM_QUEUES(bp) + offset);
6863 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6867 bp->flags |= USING_MSIX_FLAG;
6872 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6874 int i, rc, offset = 1;
6876 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6877 bp->dev->name, bp->dev);
6879 BNX2X_ERR("request sp irq failed\n");
6883 for_each_queue(bp, i) {
6884 struct bnx2x_fastpath *fp = &bp->fp[i];
6886 if (i < bp->num_rx_queues)
6887 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6889 sprintf(fp->name, "%s-tx-%d",
6890 bp->dev->name, i - bp->num_rx_queues);
6892 rc = request_irq(bp->msix_table[i + offset].vector,
6893 bnx2x_msix_fp_int, 0, fp->name, fp);
6895 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6896 bnx2x_free_msix_irqs(bp);
6900 fp->state = BNX2X_FP_STATE_IRQ;
6903 i = BNX2X_NUM_QUEUES(bp);
6904 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6906 bp->dev->name, bp->msix_table[0].vector,
6907 0, bp->msix_table[offset].vector,
6908 i - 1, bp->msix_table[offset + i - 1].vector);
6913 static int bnx2x_enable_msi(struct bnx2x *bp)
6917 rc = pci_enable_msi(bp->pdev);
6919 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6922 bp->flags |= USING_MSI_FLAG;
6927 static int bnx2x_req_irq(struct bnx2x *bp)
6929 unsigned long flags;
6932 if (bp->flags & USING_MSI_FLAG)
6935 flags = IRQF_SHARED;
6937 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6938 bp->dev->name, bp->dev);
6940 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6945 static void bnx2x_napi_enable(struct bnx2x *bp)
6949 for_each_rx_queue(bp, i)
6950 napi_enable(&bnx2x_fp(bp, i, napi));
6953 static void bnx2x_napi_disable(struct bnx2x *bp)
6957 for_each_rx_queue(bp, i)
6958 napi_disable(&bnx2x_fp(bp, i, napi));
6961 static void bnx2x_netif_start(struct bnx2x *bp)
6965 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6966 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6969 if (netif_running(bp->dev)) {
6970 bnx2x_napi_enable(bp);
6971 bnx2x_int_enable(bp);
6972 if (bp->state == BNX2X_STATE_OPEN)
6973 netif_tx_wake_all_queues(bp->dev);
6978 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6980 bnx2x_int_disable_sync(bp, disable_hw);
6981 bnx2x_napi_disable(bp);
6982 netif_tx_disable(bp->dev);
6983 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6987 * Init service functions
6990 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6992 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6993 int port = BP_PORT(bp);
6996 * unicasts 0-31:port0 32-63:port1
6997 * multicast 64-127:port0 128-191:port1
6999 config->hdr.length = 2;
7000 config->hdr.offset = port ? 32 : 0;
7001 config->hdr.client_id = bp->fp->cl_id;
7002 config->hdr.reserved1 = 0;
7005 config->config_table[0].cam_entry.msb_mac_addr =
7006 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7007 config->config_table[0].cam_entry.middle_mac_addr =
7008 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7009 config->config_table[0].cam_entry.lsb_mac_addr =
7010 swab16(*(u16 *)&bp->dev->dev_addr[4]);
7011 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7013 config->config_table[0].target_table_entry.flags = 0;
7015 CAM_INVALIDATE(config->config_table[0]);
7016 config->config_table[0].target_table_entry.clients_bit_vector =
7017 cpu_to_le32(1 << BP_L_ID(bp));
7018 config->config_table[0].target_table_entry.vlan_id = 0;
7020 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7021 (set ? "setting" : "clearing"),
7022 config->config_table[0].cam_entry.msb_mac_addr,
7023 config->config_table[0].cam_entry.middle_mac_addr,
7024 config->config_table[0].cam_entry.lsb_mac_addr);
7027 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7028 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7029 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
7030 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7032 config->config_table[1].target_table_entry.flags =
7033 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7035 CAM_INVALIDATE(config->config_table[1]);
7036 config->config_table[1].target_table_entry.clients_bit_vector =
7037 cpu_to_le32(1 << BP_L_ID(bp));
7038 config->config_table[1].target_table_entry.vlan_id = 0;
7040 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7041 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7042 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7045 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7047 struct mac_configuration_cmd_e1h *config =
7048 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7050 /* CAM allocation for E1H
7051 * unicasts: by func number
7052 * multicast: 20+FUNC*20, 20 each
7054 config->hdr.length = 1;
7055 config->hdr.offset = BP_FUNC(bp);
7056 config->hdr.client_id = bp->fp->cl_id;
7057 config->hdr.reserved1 = 0;
7060 config->config_table[0].msb_mac_addr =
7061 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7062 config->config_table[0].middle_mac_addr =
7063 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7064 config->config_table[0].lsb_mac_addr =
7065 swab16(*(u16 *)&bp->dev->dev_addr[4]);
7066 config->config_table[0].clients_bit_vector =
7067 cpu_to_le32(1 << BP_L_ID(bp));
7068 config->config_table[0].vlan_id = 0;
7069 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7071 config->config_table[0].flags = BP_PORT(bp);
7073 config->config_table[0].flags =
7074 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7076 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7077 (set ? "setting" : "clearing"),
7078 config->config_table[0].msb_mac_addr,
7079 config->config_table[0].middle_mac_addr,
7080 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7082 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7083 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7084 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7087 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7088 int *state_p, int poll)
7090 /* can take a while if any port is running */
7093 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7094 poll ? "polling" : "waiting", state, idx);
7099 bnx2x_rx_int(bp->fp, 10);
7100 /* if index is different from 0
7101 * the reply for some commands will
7102 * be on the non default queue
7105 bnx2x_rx_int(&bp->fp[idx], 10);
7108 mb(); /* state is changed by bnx2x_sp_event() */
7109 if (*state_p == state) {
7110 #ifdef BNX2X_STOP_ON_ERROR
7111 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7123 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7124 poll ? "polling" : "waiting", state, idx);
7125 #ifdef BNX2X_STOP_ON_ERROR
7132 static int bnx2x_setup_leading(struct bnx2x *bp)
7136 /* reset IGU state */
7137 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7140 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7142 /* Wait for completion */
7143 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7148 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7150 struct bnx2x_fastpath *fp = &bp->fp[index];
7152 /* reset IGU state */
7153 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7156 fp->state = BNX2X_FP_STATE_OPENING;
7157 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7160 /* Wait for completion */
7161 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7165 static int bnx2x_poll(struct napi_struct *napi, int budget);
7167 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7168 int *num_tx_queues_out)
7170 int _num_rx_queues = 0, _num_tx_queues = 0;
7172 switch (bp->multi_mode) {
7173 case ETH_RSS_MODE_DISABLED:
7178 case ETH_RSS_MODE_REGULAR:
7180 _num_rx_queues = min_t(u32, num_rx_queues,
7181 BNX2X_MAX_QUEUES(bp));
7183 _num_rx_queues = min_t(u32, num_online_cpus(),
7184 BNX2X_MAX_QUEUES(bp));
7187 _num_tx_queues = min_t(u32, num_tx_queues,
7188 BNX2X_MAX_QUEUES(bp));
7190 _num_tx_queues = min_t(u32, num_online_cpus(),
7191 BNX2X_MAX_QUEUES(bp));
7193 /* There must be not more Tx queues than Rx queues */
7194 if (_num_tx_queues > _num_rx_queues) {
7195 BNX2X_ERR("number of tx queues (%d) > "
7196 "number of rx queues (%d)"
7197 " defaulting to %d\n",
7198 _num_tx_queues, _num_rx_queues,
7200 _num_tx_queues = _num_rx_queues;
7211 *num_rx_queues_out = _num_rx_queues;
7212 *num_tx_queues_out = _num_tx_queues;
7215 static int bnx2x_set_int_mode(struct bnx2x *bp)
7222 bp->num_rx_queues = 1;
7223 bp->num_tx_queues = 1;
7224 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7229 /* Set interrupt mode according to bp->multi_mode value */
7230 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7231 &bp->num_tx_queues);
7233 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7234 bp->num_rx_queues, bp->num_tx_queues);
7236 /* if we can't use MSI-X we only need one fp,
7237 * so try to enable MSI-X with the requested number of fp's
7238 * and fallback to MSI or legacy INTx with one fp
7240 rc = bnx2x_enable_msix(bp);
7242 /* failed to enable MSI-X */
7244 BNX2X_ERR("Multi requested but failed to "
7245 "enable MSI-X (rx %d tx %d), "
7246 "set number of queues to 1\n",
7247 bp->num_rx_queues, bp->num_tx_queues);
7248 bp->num_rx_queues = 1;
7249 bp->num_tx_queues = 1;
7253 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7258 /* must be called with rtnl_lock */
7259 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7264 #ifdef BNX2X_STOP_ON_ERROR
7265 if (unlikely(bp->panic))
7269 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7271 rc = bnx2x_set_int_mode(bp);
7273 if (bnx2x_alloc_mem(bp))
7276 for_each_rx_queue(bp, i)
7277 bnx2x_fp(bp, i, disable_tpa) =
7278 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7280 for_each_rx_queue(bp, i)
7281 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7284 bnx2x_napi_enable(bp);
7286 if (bp->flags & USING_MSIX_FLAG) {
7287 rc = bnx2x_req_msix_irqs(bp);
7289 pci_disable_msix(bp->pdev);
7293 /* Fall to INTx if failed to enable MSI-X due to lack of
7294 memory (in bnx2x_set_int_mode()) */
7295 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7296 bnx2x_enable_msi(bp);
7298 rc = bnx2x_req_irq(bp);
7300 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7301 if (bp->flags & USING_MSI_FLAG)
7302 pci_disable_msi(bp->pdev);
7305 if (bp->flags & USING_MSI_FLAG) {
7306 bp->dev->irq = bp->pdev->irq;
7307 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7308 bp->dev->name, bp->pdev->irq);
7312 /* Send LOAD_REQUEST command to MCP
7313 Returns the type of LOAD command:
7314 if it is the first port to be initialized
7315 common blocks should be initialized, otherwise - not
7317 if (!BP_NOMCP(bp)) {
7318 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7320 BNX2X_ERR("MCP response failure, aborting\n");
7324 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7325 rc = -EBUSY; /* other port in diagnostic mode */
7330 int port = BP_PORT(bp);
7332 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7333 load_count[0], load_count[1], load_count[2]);
7335 load_count[1 + port]++;
7336 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7337 load_count[0], load_count[1], load_count[2]);
7338 if (load_count[0] == 1)
7339 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7340 else if (load_count[1 + port] == 1)
7341 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7343 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7346 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7347 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7351 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7354 rc = bnx2x_init_hw(bp, load_code);
7356 BNX2X_ERR("HW init failed, aborting\n");
7360 /* Setup NIC internals and enable interrupts */
7361 bnx2x_nic_init(bp, load_code);
7363 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7364 (bp->common.shmem2_base))
7365 SHMEM2_WR(bp, dcc_support,
7366 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7367 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7369 /* Send LOAD_DONE command to MCP */
7370 if (!BP_NOMCP(bp)) {
7371 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7373 BNX2X_ERR("MCP response failure, aborting\n");
7379 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7381 rc = bnx2x_setup_leading(bp);
7383 BNX2X_ERR("Setup leading failed!\n");
7384 #ifndef BNX2X_STOP_ON_ERROR
7392 if (CHIP_IS_E1H(bp))
7393 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7394 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7395 bp->state = BNX2X_STATE_DISABLED;
7398 if (bp->state == BNX2X_STATE_OPEN) {
7399 for_each_nondefault_queue(bp, i) {
7400 rc = bnx2x_setup_multi(bp, i);
7406 bnx2x_set_mac_addr_e1(bp, 1);
7408 bnx2x_set_mac_addr_e1h(bp, 1);
7412 bnx2x_initial_phy_init(bp, load_mode);
7414 /* Start fast path */
7415 switch (load_mode) {
7417 if (bp->state == BNX2X_STATE_OPEN) {
7418 /* Tx queue should be only reenabled */
7419 netif_tx_wake_all_queues(bp->dev);
7421 /* Initialize the receive filter. */
7422 bnx2x_set_rx_mode(bp->dev);
7426 netif_tx_start_all_queues(bp->dev);
7427 if (bp->state != BNX2X_STATE_OPEN)
7428 netif_tx_disable(bp->dev);
7429 /* Initialize the receive filter. */
7430 bnx2x_set_rx_mode(bp->dev);
7434 /* Initialize the receive filter. */
7435 bnx2x_set_rx_mode(bp->dev);
7436 bp->state = BNX2X_STATE_DIAG;
7444 bnx2x__link_status_update(bp);
7446 /* start the timer */
7447 mod_timer(&bp->timer, jiffies + bp->current_interval);
7453 bnx2x_int_disable_sync(bp, 1);
7454 if (!BP_NOMCP(bp)) {
7455 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7456 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7459 /* Free SKBs, SGEs, TPA pool and driver internals */
7460 bnx2x_free_skbs(bp);
7461 for_each_rx_queue(bp, i)
7462 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7467 bnx2x_napi_disable(bp);
7468 for_each_rx_queue(bp, i)
7469 netif_napi_del(&bnx2x_fp(bp, i, napi));
7475 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7477 struct bnx2x_fastpath *fp = &bp->fp[index];
7480 /* halt the connection */
7481 fp->state = BNX2X_FP_STATE_HALTING;
7482 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7484 /* Wait for completion */
7485 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7487 if (rc) /* timeout */
7490 /* delete cfc entry */
7491 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7493 /* Wait for completion */
7494 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7499 static int bnx2x_stop_leading(struct bnx2x *bp)
7501 __le16 dsb_sp_prod_idx;
7502 /* if the other port is handling traffic,
7503 this can take a lot of time */
7509 /* Send HALT ramrod */
7510 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7511 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7513 /* Wait for completion */
7514 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7515 &(bp->fp[0].state), 1);
7516 if (rc) /* timeout */
7519 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7521 /* Send PORT_DELETE ramrod */
7522 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7524 /* Wait for completion to arrive on default status block
7525 we are going to reset the chip anyway
7526 so there is not much to do if this times out
7528 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7530 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7531 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7532 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7533 #ifdef BNX2X_STOP_ON_ERROR
7541 rmb(); /* Refresh the dsb_sp_prod */
7543 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7544 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7549 static void bnx2x_reset_func(struct bnx2x *bp)
7551 int port = BP_PORT(bp);
7552 int func = BP_FUNC(bp);
7556 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7557 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7560 base = FUNC_ILT_BASE(func);
7561 for (i = base; i < base + ILT_PER_FUNC; i++)
7562 bnx2x_ilt_wr(bp, i, 0);
7565 static void bnx2x_reset_port(struct bnx2x *bp)
7567 int port = BP_PORT(bp);
7570 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7572 /* Do not rcv packets to BRB */
7573 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7574 /* Do not direct rcv packets that are not for MCP to the BRB */
7575 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7576 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7579 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7582 /* Check for BRB port occupancy */
7583 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7585 DP(NETIF_MSG_IFDOWN,
7586 "BRB1 is not empty %d blocks are occupied\n", val);
7588 /* TODO: Close Doorbell port? */
7591 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7593 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7594 BP_FUNC(bp), reset_code);
7596 switch (reset_code) {
7597 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7598 bnx2x_reset_port(bp);
7599 bnx2x_reset_func(bp);
7600 bnx2x_reset_common(bp);
7603 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7604 bnx2x_reset_port(bp);
7605 bnx2x_reset_func(bp);
7608 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7609 bnx2x_reset_func(bp);
7613 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7618 /* must be called with rtnl_lock */
7619 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7621 int port = BP_PORT(bp);
7625 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7627 bp->rx_mode = BNX2X_RX_MODE_NONE;
7628 bnx2x_set_storm_rx_mode(bp);
7630 bnx2x_netif_stop(bp, 1);
7632 del_timer_sync(&bp->timer);
7633 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7634 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7635 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7640 /* Wait until tx fastpath tasks complete */
7641 for_each_tx_queue(bp, i) {
7642 struct bnx2x_fastpath *fp = &bp->fp[i];
7645 while (bnx2x_has_tx_work_unload(fp)) {
7649 BNX2X_ERR("timeout waiting for queue[%d]\n",
7651 #ifdef BNX2X_STOP_ON_ERROR
7662 /* Give HW time to discard old tx messages */
7665 if (CHIP_IS_E1(bp)) {
7666 struct mac_configuration_cmd *config =
7667 bnx2x_sp(bp, mcast_config);
7669 bnx2x_set_mac_addr_e1(bp, 0);
7671 for (i = 0; i < config->hdr.length; i++)
7672 CAM_INVALIDATE(config->config_table[i]);
7674 config->hdr.length = i;
7675 if (CHIP_REV_IS_SLOW(bp))
7676 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7678 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7679 config->hdr.client_id = bp->fp->cl_id;
7680 config->hdr.reserved1 = 0;
7682 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7683 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7684 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7687 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7689 bnx2x_set_mac_addr_e1h(bp, 0);
7691 for (i = 0; i < MC_HASH_SIZE; i++)
7692 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7694 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7697 if (unload_mode == UNLOAD_NORMAL)
7698 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7700 else if (bp->flags & NO_WOL_FLAG)
7701 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7704 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7705 u8 *mac_addr = bp->dev->dev_addr;
7707 /* The mac address is written to entries 1-4 to
7708 preserve entry 0 which is used by the PMF */
7709 u8 entry = (BP_E1HVN(bp) + 1)*8;
7711 val = (mac_addr[0] << 8) | mac_addr[1];
7712 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7714 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7715 (mac_addr[4] << 8) | mac_addr[5];
7716 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7718 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7721 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7723 /* Close multi and leading connections
7724 Completions for ramrods are collected in a synchronous way */
7725 for_each_nondefault_queue(bp, i)
7726 if (bnx2x_stop_multi(bp, i))
7729 rc = bnx2x_stop_leading(bp);
7731 BNX2X_ERR("Stop leading failed!\n");
7732 #ifdef BNX2X_STOP_ON_ERROR
7741 reset_code = bnx2x_fw_command(bp, reset_code);
7743 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7744 load_count[0], load_count[1], load_count[2]);
7746 load_count[1 + port]--;
7747 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7748 load_count[0], load_count[1], load_count[2]);
7749 if (load_count[0] == 0)
7750 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7751 else if (load_count[1 + port] == 0)
7752 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7754 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7757 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7758 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7759 bnx2x__link_reset(bp);
7761 /* Reset the chip */
7762 bnx2x_reset_chip(bp, reset_code);
7764 /* Report UNLOAD_DONE to MCP */
7766 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7770 /* Free SKBs, SGEs, TPA pool and driver internals */
7771 bnx2x_free_skbs(bp);
7772 for_each_rx_queue(bp, i)
7773 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7774 for_each_rx_queue(bp, i)
7775 netif_napi_del(&bnx2x_fp(bp, i, napi));
7778 bp->state = BNX2X_STATE_CLOSED;
7780 netif_carrier_off(bp->dev);
7785 static void bnx2x_reset_task(struct work_struct *work)
7787 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7789 #ifdef BNX2X_STOP_ON_ERROR
7790 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7791 " so reset not done to allow debug dump,\n"
7792 " you will need to reboot when done\n");
7798 if (!netif_running(bp->dev))
7799 goto reset_task_exit;
7801 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7802 bnx2x_nic_load(bp, LOAD_NORMAL);
7808 /* end of nic load/unload */
7813 * Init service functions
7816 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7819 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7820 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7821 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7822 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7823 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7824 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7825 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7826 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7828 BNX2X_ERR("Unsupported function index: %d\n", func);
7833 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7835 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7837 /* Flush all outstanding writes */
7840 /* Pretend to be function 0 */
7842 /* Flush the GRC transaction (in the chip) */
7843 new_val = REG_RD(bp, reg);
7845 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7850 /* From now we are in the "like-E1" mode */
7851 bnx2x_int_disable(bp);
7853 /* Flush all outstanding writes */
7856 /* Restore the original funtion settings */
7857 REG_WR(bp, reg, orig_func);
7858 new_val = REG_RD(bp, reg);
7859 if (new_val != orig_func) {
7860 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7861 orig_func, new_val);
7866 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7868 if (CHIP_IS_E1H(bp))
7869 bnx2x_undi_int_disable_e1h(bp, func);
7871 bnx2x_int_disable(bp);
7874 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7878 /* Check if there is any driver already loaded */
7879 val = REG_RD(bp, MISC_REG_UNPREPARED);
7881 /* Check if it is the UNDI driver
7882 * UNDI driver initializes CID offset for normal bell to 0x7
7884 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7885 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7887 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7889 int func = BP_FUNC(bp);
7893 /* clear the UNDI indication */
7894 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7896 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7898 /* try unload UNDI on port 0 */
7901 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7902 DRV_MSG_SEQ_NUMBER_MASK);
7903 reset_code = bnx2x_fw_command(bp, reset_code);
7905 /* if UNDI is loaded on the other port */
7906 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7908 /* send "DONE" for previous unload */
7909 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7911 /* unload UNDI on port 1 */
7914 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7915 DRV_MSG_SEQ_NUMBER_MASK);
7916 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7918 bnx2x_fw_command(bp, reset_code);
7921 /* now it's safe to release the lock */
7922 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7924 bnx2x_undi_int_disable(bp, func);
7926 /* close input traffic and wait for it */
7927 /* Do not rcv packets to BRB */
7929 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7930 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7931 /* Do not direct rcv packets that are not for MCP to
7934 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7935 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7938 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7939 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7942 /* save NIG port swap info */
7943 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7944 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7947 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7950 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7952 /* take the NIG out of reset and restore swap values */
7954 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7955 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7956 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7957 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7959 /* send unload done to the MCP */
7960 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7962 /* restore our func and fw_seq */
7965 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7966 DRV_MSG_SEQ_NUMBER_MASK);
7969 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7973 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7975 u32 val, val2, val3, val4, id;
7978 /* Get the chip revision id and number. */
7979 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7980 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7981 id = ((val & 0xffff) << 16);
7982 val = REG_RD(bp, MISC_REG_CHIP_REV);
7983 id |= ((val & 0xf) << 12);
7984 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7985 id |= ((val & 0xff) << 4);
7986 val = REG_RD(bp, MISC_REG_BOND_ID);
7988 bp->common.chip_id = id;
7989 bp->link_params.chip_id = bp->common.chip_id;
7990 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7992 val = (REG_RD(bp, 0x2874) & 0x55);
7993 if ((bp->common.chip_id & 0x1) ||
7994 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7995 bp->flags |= ONE_PORT_FLAG;
7996 BNX2X_DEV_INFO("single port device\n");
7999 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8000 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8001 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8002 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8003 bp->common.flash_size, bp->common.flash_size);
8005 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8006 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8007 bp->link_params.shmem_base = bp->common.shmem_base;
8008 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8009 bp->common.shmem_base, bp->common.shmem2_base);
8011 if (!bp->common.shmem_base ||
8012 (bp->common.shmem_base < 0xA0000) ||
8013 (bp->common.shmem_base >= 0xC0000)) {
8014 BNX2X_DEV_INFO("MCP not active\n");
8015 bp->flags |= NO_MCP_FLAG;
8019 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8020 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8021 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8022 BNX2X_ERR("BAD MCP validity signature\n");
8024 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8025 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8027 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8028 SHARED_HW_CFG_LED_MODE_MASK) >>
8029 SHARED_HW_CFG_LED_MODE_SHIFT);
8031 bp->link_params.feature_config_flags = 0;
8032 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8033 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8034 bp->link_params.feature_config_flags |=
8035 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8037 bp->link_params.feature_config_flags &=
8038 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8040 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8041 bp->common.bc_ver = val;
8042 BNX2X_DEV_INFO("bc_ver %X\n", val);
8043 if (val < BNX2X_BC_VER) {
8044 /* for now only warn
8045 * later we might need to enforce this */
8046 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8047 " please upgrade BC\n", BNX2X_BC_VER, val);
8049 bp->link_params.feature_config_flags |=
8050 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8051 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8053 if (BP_E1HVN(bp) == 0) {
8054 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8055 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8057 /* no WOL capability for E1HVN != 0 */
8058 bp->flags |= NO_WOL_FLAG;
8060 BNX2X_DEV_INFO("%sWoL capable\n",
8061 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8063 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8064 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8065 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8066 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8068 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8069 val, val2, val3, val4);
8072 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8075 int port = BP_PORT(bp);
8078 switch (switch_cfg) {
8080 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8083 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8084 switch (ext_phy_type) {
8085 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8086 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8089 bp->port.supported |= (SUPPORTED_10baseT_Half |
8090 SUPPORTED_10baseT_Full |
8091 SUPPORTED_100baseT_Half |
8092 SUPPORTED_100baseT_Full |
8093 SUPPORTED_1000baseT_Full |
8094 SUPPORTED_2500baseX_Full |
8099 SUPPORTED_Asym_Pause);
8102 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8103 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8106 bp->port.supported |= (SUPPORTED_10baseT_Half |
8107 SUPPORTED_10baseT_Full |
8108 SUPPORTED_100baseT_Half |
8109 SUPPORTED_100baseT_Full |
8110 SUPPORTED_1000baseT_Full |
8115 SUPPORTED_Asym_Pause);
8119 BNX2X_ERR("NVRAM config error. "
8120 "BAD SerDes ext_phy_config 0x%x\n",
8121 bp->link_params.ext_phy_config);
8125 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8127 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8130 case SWITCH_CFG_10G:
8131 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8134 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8135 switch (ext_phy_type) {
8136 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8137 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8140 bp->port.supported |= (SUPPORTED_10baseT_Half |
8141 SUPPORTED_10baseT_Full |
8142 SUPPORTED_100baseT_Half |
8143 SUPPORTED_100baseT_Full |
8144 SUPPORTED_1000baseT_Full |
8145 SUPPORTED_2500baseX_Full |
8146 SUPPORTED_10000baseT_Full |
8151 SUPPORTED_Asym_Pause);
8154 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8155 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8158 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8159 SUPPORTED_1000baseT_Full |
8163 SUPPORTED_Asym_Pause);
8166 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8167 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8170 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8171 SUPPORTED_2500baseX_Full |
8172 SUPPORTED_1000baseT_Full |
8176 SUPPORTED_Asym_Pause);
8179 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8180 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8183 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8186 SUPPORTED_Asym_Pause);
8189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8190 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8193 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8194 SUPPORTED_1000baseT_Full |
8197 SUPPORTED_Asym_Pause);
8200 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8201 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8204 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8205 SUPPORTED_1000baseT_Full |
8209 SUPPORTED_Asym_Pause);
8212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8213 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8216 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8217 SUPPORTED_1000baseT_Full |
8221 SUPPORTED_Asym_Pause);
8224 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8225 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8228 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8232 SUPPORTED_Asym_Pause);
8235 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8236 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8239 bp->port.supported |= (SUPPORTED_10baseT_Half |
8240 SUPPORTED_10baseT_Full |
8241 SUPPORTED_100baseT_Half |
8242 SUPPORTED_100baseT_Full |
8243 SUPPORTED_1000baseT_Full |
8244 SUPPORTED_10000baseT_Full |
8248 SUPPORTED_Asym_Pause);
8251 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8252 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8253 bp->link_params.ext_phy_config);
8257 BNX2X_ERR("NVRAM config error. "
8258 "BAD XGXS ext_phy_config 0x%x\n",
8259 bp->link_params.ext_phy_config);
8263 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8265 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8270 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8271 bp->port.link_config);
8274 bp->link_params.phy_addr = bp->port.phy_addr;
8276 /* mask what we support according to speed_cap_mask */
8277 if (!(bp->link_params.speed_cap_mask &
8278 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8279 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8281 if (!(bp->link_params.speed_cap_mask &
8282 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8283 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8285 if (!(bp->link_params.speed_cap_mask &
8286 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8287 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8289 if (!(bp->link_params.speed_cap_mask &
8290 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8291 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8293 if (!(bp->link_params.speed_cap_mask &
8294 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8295 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8296 SUPPORTED_1000baseT_Full);
8298 if (!(bp->link_params.speed_cap_mask &
8299 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8300 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8302 if (!(bp->link_params.speed_cap_mask &
8303 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8304 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8306 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8309 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8311 bp->link_params.req_duplex = DUPLEX_FULL;
8313 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8314 case PORT_FEATURE_LINK_SPEED_AUTO:
8315 if (bp->port.supported & SUPPORTED_Autoneg) {
8316 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8317 bp->port.advertising = bp->port.supported;
8320 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8322 if ((ext_phy_type ==
8323 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8325 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8326 /* force 10G, no AN */
8327 bp->link_params.req_line_speed = SPEED_10000;
8328 bp->port.advertising =
8329 (ADVERTISED_10000baseT_Full |
8333 BNX2X_ERR("NVRAM config error. "
8334 "Invalid link_config 0x%x"
8335 " Autoneg not supported\n",
8336 bp->port.link_config);
8341 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8342 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8343 bp->link_params.req_line_speed = SPEED_10;
8344 bp->port.advertising = (ADVERTISED_10baseT_Full |
8347 BNX2X_ERR("NVRAM config error. "
8348 "Invalid link_config 0x%x"
8349 " speed_cap_mask 0x%x\n",
8350 bp->port.link_config,
8351 bp->link_params.speed_cap_mask);
8356 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8357 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8358 bp->link_params.req_line_speed = SPEED_10;
8359 bp->link_params.req_duplex = DUPLEX_HALF;
8360 bp->port.advertising = (ADVERTISED_10baseT_Half |
8363 BNX2X_ERR("NVRAM config error. "
8364 "Invalid link_config 0x%x"
8365 " speed_cap_mask 0x%x\n",
8366 bp->port.link_config,
8367 bp->link_params.speed_cap_mask);
8372 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8373 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8374 bp->link_params.req_line_speed = SPEED_100;
8375 bp->port.advertising = (ADVERTISED_100baseT_Full |
8378 BNX2X_ERR("NVRAM config error. "
8379 "Invalid link_config 0x%x"
8380 " speed_cap_mask 0x%x\n",
8381 bp->port.link_config,
8382 bp->link_params.speed_cap_mask);
8387 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8388 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8389 bp->link_params.req_line_speed = SPEED_100;
8390 bp->link_params.req_duplex = DUPLEX_HALF;
8391 bp->port.advertising = (ADVERTISED_100baseT_Half |
8394 BNX2X_ERR("NVRAM config error. "
8395 "Invalid link_config 0x%x"
8396 " speed_cap_mask 0x%x\n",
8397 bp->port.link_config,
8398 bp->link_params.speed_cap_mask);
8403 case PORT_FEATURE_LINK_SPEED_1G:
8404 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8405 bp->link_params.req_line_speed = SPEED_1000;
8406 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8409 BNX2X_ERR("NVRAM config error. "
8410 "Invalid link_config 0x%x"
8411 " speed_cap_mask 0x%x\n",
8412 bp->port.link_config,
8413 bp->link_params.speed_cap_mask);
8418 case PORT_FEATURE_LINK_SPEED_2_5G:
8419 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8420 bp->link_params.req_line_speed = SPEED_2500;
8421 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8424 BNX2X_ERR("NVRAM config error. "
8425 "Invalid link_config 0x%x"
8426 " speed_cap_mask 0x%x\n",
8427 bp->port.link_config,
8428 bp->link_params.speed_cap_mask);
8433 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8434 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8435 case PORT_FEATURE_LINK_SPEED_10G_KR:
8436 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8437 bp->link_params.req_line_speed = SPEED_10000;
8438 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8441 BNX2X_ERR("NVRAM config error. "
8442 "Invalid link_config 0x%x"
8443 " speed_cap_mask 0x%x\n",
8444 bp->port.link_config,
8445 bp->link_params.speed_cap_mask);
8451 BNX2X_ERR("NVRAM config error. "
8452 "BAD link speed link_config 0x%x\n",
8453 bp->port.link_config);
8454 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8455 bp->port.advertising = bp->port.supported;
8459 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8460 PORT_FEATURE_FLOW_CONTROL_MASK);
8461 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8462 !(bp->port.supported & SUPPORTED_Autoneg))
8463 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8465 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8466 " advertising 0x%x\n",
8467 bp->link_params.req_line_speed,
8468 bp->link_params.req_duplex,
8469 bp->link_params.req_flow_ctrl, bp->port.advertising);
8472 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8474 int port = BP_PORT(bp);
8480 bp->link_params.bp = bp;
8481 bp->link_params.port = port;
8483 bp->link_params.lane_config =
8484 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8485 bp->link_params.ext_phy_config =
8487 dev_info.port_hw_config[port].external_phy_config);
8488 /* BCM8727_NOC => BCM8727 no over current */
8489 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8490 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8491 bp->link_params.ext_phy_config &=
8492 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8493 bp->link_params.ext_phy_config |=
8494 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8495 bp->link_params.feature_config_flags |=
8496 FEATURE_CONFIG_BCM8727_NOC;
8499 bp->link_params.speed_cap_mask =
8501 dev_info.port_hw_config[port].speed_capability_mask);
8503 bp->port.link_config =
8504 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8506 /* Get the 4 lanes xgxs config rx and tx */
8507 for (i = 0; i < 2; i++) {
8509 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8510 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8511 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8514 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8515 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8516 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8519 /* If the device is capable of WoL, set the default state according
8522 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8523 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8524 (config & PORT_FEATURE_WOL_ENABLED));
8526 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8527 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8528 bp->link_params.lane_config,
8529 bp->link_params.ext_phy_config,
8530 bp->link_params.speed_cap_mask, bp->port.link_config);
8532 bp->link_params.switch_cfg |= (bp->port.link_config &
8533 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8534 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8536 bnx2x_link_settings_requested(bp);
8539 * If connected directly, work with the internal PHY, otherwise, work
8540 * with the external PHY
8542 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8543 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8544 bp->mdio.prtad = bp->link_params.phy_addr;
8546 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8547 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8549 (bp->link_params.ext_phy_config &
8550 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8551 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8553 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8554 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8555 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8556 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8557 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8558 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8559 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8560 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8561 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8562 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8565 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8567 int func = BP_FUNC(bp);
8571 bnx2x_get_common_hwinfo(bp);
8575 if (CHIP_IS_E1H(bp)) {
8577 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8579 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8580 FUNC_MF_CFG_E1HOV_TAG_MASK);
8581 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8583 BNX2X_DEV_INFO("%s function mode\n",
8584 IS_E1HMF(bp) ? "multi" : "single");
8587 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8589 FUNC_MF_CFG_E1HOV_TAG_MASK);
8590 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8592 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8594 func, bp->e1hov, bp->e1hov);
8596 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8597 " aborting\n", func);
8602 BNX2X_ERR("!!! VN %d in single function mode,"
8603 " aborting\n", BP_E1HVN(bp));
8609 if (!BP_NOMCP(bp)) {
8610 bnx2x_get_port_hwinfo(bp);
8612 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8613 DRV_MSG_SEQ_NUMBER_MASK);
8614 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8618 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8619 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8620 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8621 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8622 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8623 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8624 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8625 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8626 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8627 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8628 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8630 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8638 /* only supposed to happen on emulation/FPGA */
8639 BNX2X_ERR("warning random MAC workaround active\n");
8640 random_ether_addr(bp->dev->dev_addr);
8641 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8647 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8649 int func = BP_FUNC(bp);
8653 /* Disable interrupt handling until HW is initialized */
8654 atomic_set(&bp->intr_sem, 1);
8655 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8657 mutex_init(&bp->port.phy_mutex);
8659 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8660 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8662 rc = bnx2x_get_hwinfo(bp);
8664 /* need to reset chip if undi was active */
8666 bnx2x_undi_unload(bp);
8668 if (CHIP_REV_IS_FPGA(bp))
8669 printk(KERN_ERR PFX "FPGA detected\n");
8671 if (BP_NOMCP(bp) && (func == 0))
8673 "MCP disabled, must load devices in order!\n");
8675 /* Set multi queue mode */
8676 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8677 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8679 "Multi disabled since int_mode requested is not MSI-X\n");
8680 multi_mode = ETH_RSS_MODE_DISABLED;
8682 bp->multi_mode = multi_mode;
8687 bp->flags &= ~TPA_ENABLE_FLAG;
8688 bp->dev->features &= ~NETIF_F_LRO;
8690 bp->flags |= TPA_ENABLE_FLAG;
8691 bp->dev->features |= NETIF_F_LRO;
8695 bp->dropless_fc = 0;
8697 bp->dropless_fc = dropless_fc;
8701 bp->tx_ring_size = MAX_TX_AVAIL;
8702 bp->rx_ring_size = MAX_RX_AVAIL;
8709 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8710 bp->current_interval = (poll ? poll : timer_interval);
8712 init_timer(&bp->timer);
8713 bp->timer.expires = jiffies + bp->current_interval;
8714 bp->timer.data = (unsigned long) bp;
8715 bp->timer.function = bnx2x_timer;
8721 * ethtool service functions
8724 /* All ethtool functions called with rtnl_lock */
8726 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8728 struct bnx2x *bp = netdev_priv(dev);
8730 cmd->supported = bp->port.supported;
8731 cmd->advertising = bp->port.advertising;
8733 if (netif_carrier_ok(dev)) {
8734 cmd->speed = bp->link_vars.line_speed;
8735 cmd->duplex = bp->link_vars.duplex;
8737 cmd->speed = bp->link_params.req_line_speed;
8738 cmd->duplex = bp->link_params.req_duplex;
8743 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8744 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8745 if (vn_max_rate < cmd->speed)
8746 cmd->speed = vn_max_rate;
8749 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8751 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8753 switch (ext_phy_type) {
8754 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8755 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8756 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8758 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8759 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8760 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8761 cmd->port = PORT_FIBRE;
8764 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8765 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8766 cmd->port = PORT_TP;
8769 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8770 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8771 bp->link_params.ext_phy_config);
8775 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8776 bp->link_params.ext_phy_config);
8780 cmd->port = PORT_TP;
8782 cmd->phy_address = bp->mdio.prtad;
8783 cmd->transceiver = XCVR_INTERNAL;
8785 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8786 cmd->autoneg = AUTONEG_ENABLE;
8788 cmd->autoneg = AUTONEG_DISABLE;
8793 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8794 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8795 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8796 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8797 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8798 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8799 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8804 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8806 struct bnx2x *bp = netdev_priv(dev);
8812 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8813 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8814 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8815 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8816 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8817 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8818 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8820 if (cmd->autoneg == AUTONEG_ENABLE) {
8821 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8822 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8826 /* advertise the requested speed and duplex if supported */
8827 cmd->advertising &= bp->port.supported;
8829 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8830 bp->link_params.req_duplex = DUPLEX_FULL;
8831 bp->port.advertising |= (ADVERTISED_Autoneg |
8834 } else { /* forced speed */
8835 /* advertise the requested speed and duplex if supported */
8836 switch (cmd->speed) {
8838 if (cmd->duplex == DUPLEX_FULL) {
8839 if (!(bp->port.supported &
8840 SUPPORTED_10baseT_Full)) {
8842 "10M full not supported\n");
8846 advertising = (ADVERTISED_10baseT_Full |
8849 if (!(bp->port.supported &
8850 SUPPORTED_10baseT_Half)) {
8852 "10M half not supported\n");
8856 advertising = (ADVERTISED_10baseT_Half |
8862 if (cmd->duplex == DUPLEX_FULL) {
8863 if (!(bp->port.supported &
8864 SUPPORTED_100baseT_Full)) {
8866 "100M full not supported\n");
8870 advertising = (ADVERTISED_100baseT_Full |
8873 if (!(bp->port.supported &
8874 SUPPORTED_100baseT_Half)) {
8876 "100M half not supported\n");
8880 advertising = (ADVERTISED_100baseT_Half |
8886 if (cmd->duplex != DUPLEX_FULL) {
8887 DP(NETIF_MSG_LINK, "1G half not supported\n");
8891 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8892 DP(NETIF_MSG_LINK, "1G full not supported\n");
8896 advertising = (ADVERTISED_1000baseT_Full |
8901 if (cmd->duplex != DUPLEX_FULL) {
8903 "2.5G half not supported\n");
8907 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8909 "2.5G full not supported\n");
8913 advertising = (ADVERTISED_2500baseX_Full |
8918 if (cmd->duplex != DUPLEX_FULL) {
8919 DP(NETIF_MSG_LINK, "10G half not supported\n");
8923 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8924 DP(NETIF_MSG_LINK, "10G full not supported\n");
8928 advertising = (ADVERTISED_10000baseT_Full |
8933 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8937 bp->link_params.req_line_speed = cmd->speed;
8938 bp->link_params.req_duplex = cmd->duplex;
8939 bp->port.advertising = advertising;
8942 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8943 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8944 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8945 bp->port.advertising);
8947 if (netif_running(dev)) {
8948 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8955 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8956 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8958 static int bnx2x_get_regs_len(struct net_device *dev)
8960 struct bnx2x *bp = netdev_priv(dev);
8961 int regdump_len = 0;
8964 if (CHIP_IS_E1(bp)) {
8965 for (i = 0; i < REGS_COUNT; i++)
8966 if (IS_E1_ONLINE(reg_addrs[i].info))
8967 regdump_len += reg_addrs[i].size;
8969 for (i = 0; i < WREGS_COUNT_E1; i++)
8970 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8971 regdump_len += wreg_addrs_e1[i].size *
8972 (1 + wreg_addrs_e1[i].read_regs_count);
8975 for (i = 0; i < REGS_COUNT; i++)
8976 if (IS_E1H_ONLINE(reg_addrs[i].info))
8977 regdump_len += reg_addrs[i].size;
8979 for (i = 0; i < WREGS_COUNT_E1H; i++)
8980 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8981 regdump_len += wreg_addrs_e1h[i].size *
8982 (1 + wreg_addrs_e1h[i].read_regs_count);
8985 regdump_len += sizeof(struct dump_hdr);
8990 static void bnx2x_get_regs(struct net_device *dev,
8991 struct ethtool_regs *regs, void *_p)
8994 struct bnx2x *bp = netdev_priv(dev);
8995 struct dump_hdr dump_hdr = {0};
8998 memset(p, 0, regs->len);
9000 if (!netif_running(bp->dev))
9003 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9004 dump_hdr.dump_sign = dump_sign_all;
9005 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9006 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9007 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9008 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9009 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9011 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9012 p += dump_hdr.hdr_size + 1;
9014 if (CHIP_IS_E1(bp)) {
9015 for (i = 0; i < REGS_COUNT; i++)
9016 if (IS_E1_ONLINE(reg_addrs[i].info))
9017 for (j = 0; j < reg_addrs[i].size; j++)
9019 reg_addrs[i].addr + j*4);
9022 for (i = 0; i < REGS_COUNT; i++)
9023 if (IS_E1H_ONLINE(reg_addrs[i].info))
9024 for (j = 0; j < reg_addrs[i].size; j++)
9026 reg_addrs[i].addr + j*4);
9030 #define PHY_FW_VER_LEN 10
9032 static void bnx2x_get_drvinfo(struct net_device *dev,
9033 struct ethtool_drvinfo *info)
9035 struct bnx2x *bp = netdev_priv(dev);
9036 u8 phy_fw_ver[PHY_FW_VER_LEN];
9038 strcpy(info->driver, DRV_MODULE_NAME);
9039 strcpy(info->version, DRV_MODULE_VERSION);
9041 phy_fw_ver[0] = '\0';
9043 bnx2x_acquire_phy_lock(bp);
9044 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9045 (bp->state != BNX2X_STATE_CLOSED),
9046 phy_fw_ver, PHY_FW_VER_LEN);
9047 bnx2x_release_phy_lock(bp);
9050 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9051 (bp->common.bc_ver & 0xff0000) >> 16,
9052 (bp->common.bc_ver & 0xff00) >> 8,
9053 (bp->common.bc_ver & 0xff),
9054 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9055 strcpy(info->bus_info, pci_name(bp->pdev));
9056 info->n_stats = BNX2X_NUM_STATS;
9057 info->testinfo_len = BNX2X_NUM_TESTS;
9058 info->eedump_len = bp->common.flash_size;
9059 info->regdump_len = bnx2x_get_regs_len(dev);
9062 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9064 struct bnx2x *bp = netdev_priv(dev);
9066 if (bp->flags & NO_WOL_FLAG) {
9070 wol->supported = WAKE_MAGIC;
9072 wol->wolopts = WAKE_MAGIC;
9076 memset(&wol->sopass, 0, sizeof(wol->sopass));
9079 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9081 struct bnx2x *bp = netdev_priv(dev);
9083 if (wol->wolopts & ~WAKE_MAGIC)
9086 if (wol->wolopts & WAKE_MAGIC) {
9087 if (bp->flags & NO_WOL_FLAG)
9097 static u32 bnx2x_get_msglevel(struct net_device *dev)
9099 struct bnx2x *bp = netdev_priv(dev);
9101 return bp->msglevel;
9104 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9106 struct bnx2x *bp = netdev_priv(dev);
9108 if (capable(CAP_NET_ADMIN))
9109 bp->msglevel = level;
9112 static int bnx2x_nway_reset(struct net_device *dev)
9114 struct bnx2x *bp = netdev_priv(dev);
9119 if (netif_running(dev)) {
9120 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9128 bnx2x_get_link(struct net_device *dev)
9130 struct bnx2x *bp = netdev_priv(dev);
9132 return bp->link_vars.link_up;
9135 static int bnx2x_get_eeprom_len(struct net_device *dev)
9137 struct bnx2x *bp = netdev_priv(dev);
9139 return bp->common.flash_size;
9142 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9144 int port = BP_PORT(bp);
9148 /* adjust timeout for emulation/FPGA */
9149 count = NVRAM_TIMEOUT_COUNT;
9150 if (CHIP_REV_IS_SLOW(bp))
9153 /* request access to nvram interface */
9154 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9155 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9157 for (i = 0; i < count*10; i++) {
9158 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9159 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9165 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9166 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9173 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9175 int port = BP_PORT(bp);
9179 /* adjust timeout for emulation/FPGA */
9180 count = NVRAM_TIMEOUT_COUNT;
9181 if (CHIP_REV_IS_SLOW(bp))
9184 /* relinquish nvram interface */
9185 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9186 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9188 for (i = 0; i < count*10; i++) {
9189 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9190 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9196 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9197 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9204 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9208 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9210 /* enable both bits, even on read */
9211 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9212 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9213 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9216 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9220 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9222 /* disable both bits, even after read */
9223 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9224 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9225 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9228 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9234 /* build the command word */
9235 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9237 /* need to clear DONE bit separately */
9238 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9240 /* address of the NVRAM to read from */
9241 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9242 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9244 /* issue a read command */
9245 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9247 /* adjust timeout for emulation/FPGA */
9248 count = NVRAM_TIMEOUT_COUNT;
9249 if (CHIP_REV_IS_SLOW(bp))
9252 /* wait for completion */
9255 for (i = 0; i < count; i++) {
9257 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9259 if (val & MCPR_NVM_COMMAND_DONE) {
9260 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9261 /* we read nvram data in cpu order
9262 * but ethtool sees it as an array of bytes
9263 * converting to big-endian will do the work */
9264 *ret_val = cpu_to_be32(val);
9273 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9280 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9282 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9287 if (offset + buf_size > bp->common.flash_size) {
9288 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9289 " buf_size (0x%x) > flash_size (0x%x)\n",
9290 offset, buf_size, bp->common.flash_size);
9294 /* request access to nvram interface */
9295 rc = bnx2x_acquire_nvram_lock(bp);
9299 /* enable access to nvram interface */
9300 bnx2x_enable_nvram_access(bp);
9302 /* read the first word(s) */
9303 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9304 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9305 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9306 memcpy(ret_buf, &val, 4);
9308 /* advance to the next dword */
9309 offset += sizeof(u32);
9310 ret_buf += sizeof(u32);
9311 buf_size -= sizeof(u32);
9316 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9317 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9318 memcpy(ret_buf, &val, 4);
9321 /* disable access to nvram interface */
9322 bnx2x_disable_nvram_access(bp);
9323 bnx2x_release_nvram_lock(bp);
9328 static int bnx2x_get_eeprom(struct net_device *dev,
9329 struct ethtool_eeprom *eeprom, u8 *eebuf)
9331 struct bnx2x *bp = netdev_priv(dev);
9334 if (!netif_running(dev))
9337 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9338 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9339 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9340 eeprom->len, eeprom->len);
9342 /* parameters already validated in ethtool_get_eeprom */
9344 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9349 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9354 /* build the command word */
9355 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9357 /* need to clear DONE bit separately */
9358 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9360 /* write the data */
9361 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9363 /* address of the NVRAM to write to */
9364 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9365 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9367 /* issue the write command */
9368 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9370 /* adjust timeout for emulation/FPGA */
9371 count = NVRAM_TIMEOUT_COUNT;
9372 if (CHIP_REV_IS_SLOW(bp))
9375 /* wait for completion */
9377 for (i = 0; i < count; i++) {
9379 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9380 if (val & MCPR_NVM_COMMAND_DONE) {
9389 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9391 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9399 if (offset + buf_size > bp->common.flash_size) {
9400 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9401 " buf_size (0x%x) > flash_size (0x%x)\n",
9402 offset, buf_size, bp->common.flash_size);
9406 /* request access to nvram interface */
9407 rc = bnx2x_acquire_nvram_lock(bp);
9411 /* enable access to nvram interface */
9412 bnx2x_enable_nvram_access(bp);
9414 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9415 align_offset = (offset & ~0x03);
9416 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9419 val &= ~(0xff << BYTE_OFFSET(offset));
9420 val |= (*data_buf << BYTE_OFFSET(offset));
9422 /* nvram data is returned as an array of bytes
9423 * convert it back to cpu order */
9424 val = be32_to_cpu(val);
9426 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9430 /* disable access to nvram interface */
9431 bnx2x_disable_nvram_access(bp);
9432 bnx2x_release_nvram_lock(bp);
9437 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9445 if (buf_size == 1) /* ethtool */
9446 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9448 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9450 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9455 if (offset + buf_size > bp->common.flash_size) {
9456 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9457 " buf_size (0x%x) > flash_size (0x%x)\n",
9458 offset, buf_size, bp->common.flash_size);
9462 /* request access to nvram interface */
9463 rc = bnx2x_acquire_nvram_lock(bp);
9467 /* enable access to nvram interface */
9468 bnx2x_enable_nvram_access(bp);
9471 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9472 while ((written_so_far < buf_size) && (rc == 0)) {
9473 if (written_so_far == (buf_size - sizeof(u32)))
9474 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9475 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9476 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9477 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9478 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9480 memcpy(&val, data_buf, 4);
9482 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9484 /* advance to the next dword */
9485 offset += sizeof(u32);
9486 data_buf += sizeof(u32);
9487 written_so_far += sizeof(u32);
9491 /* disable access to nvram interface */
9492 bnx2x_disable_nvram_access(bp);
9493 bnx2x_release_nvram_lock(bp);
9498 static int bnx2x_set_eeprom(struct net_device *dev,
9499 struct ethtool_eeprom *eeprom, u8 *eebuf)
9501 struct bnx2x *bp = netdev_priv(dev);
9502 int port = BP_PORT(bp);
9505 if (!netif_running(dev))
9508 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9509 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9510 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9511 eeprom->len, eeprom->len);
9513 /* parameters already validated in ethtool_set_eeprom */
9515 /* PHY eeprom can be accessed only by the PMF */
9516 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9520 if (eeprom->magic == 0x50485950) {
9521 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9522 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9524 bnx2x_acquire_phy_lock(bp);
9525 rc |= bnx2x_link_reset(&bp->link_params,
9527 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9528 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9529 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9530 MISC_REGISTERS_GPIO_HIGH, port);
9531 bnx2x_release_phy_lock(bp);
9532 bnx2x_link_report(bp);
9534 } else if (eeprom->magic == 0x50485952) {
9535 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9536 if ((bp->state == BNX2X_STATE_OPEN) ||
9537 (bp->state == BNX2X_STATE_DISABLED)) {
9538 bnx2x_acquire_phy_lock(bp);
9539 rc |= bnx2x_link_reset(&bp->link_params,
9542 rc |= bnx2x_phy_init(&bp->link_params,
9544 bnx2x_release_phy_lock(bp);
9545 bnx2x_calc_fc_adv(bp);
9547 } else if (eeprom->magic == 0x53985943) {
9548 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9549 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9550 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9552 (bp->link_params.ext_phy_config &
9553 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9554 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9556 /* DSP Remove Download Mode */
9557 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9558 MISC_REGISTERS_GPIO_LOW, port);
9560 bnx2x_acquire_phy_lock(bp);
9562 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9564 /* wait 0.5 sec to allow it to run */
9566 bnx2x_ext_phy_hw_reset(bp, port);
9568 bnx2x_release_phy_lock(bp);
9571 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9576 static int bnx2x_get_coalesce(struct net_device *dev,
9577 struct ethtool_coalesce *coal)
9579 struct bnx2x *bp = netdev_priv(dev);
9581 memset(coal, 0, sizeof(struct ethtool_coalesce));
9583 coal->rx_coalesce_usecs = bp->rx_ticks;
9584 coal->tx_coalesce_usecs = bp->tx_ticks;
9589 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9590 static int bnx2x_set_coalesce(struct net_device *dev,
9591 struct ethtool_coalesce *coal)
9593 struct bnx2x *bp = netdev_priv(dev);
9595 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9596 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9597 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9599 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9600 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9601 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9603 if (netif_running(dev))
9604 bnx2x_update_coalesce(bp);
9609 static void bnx2x_get_ringparam(struct net_device *dev,
9610 struct ethtool_ringparam *ering)
9612 struct bnx2x *bp = netdev_priv(dev);
9614 ering->rx_max_pending = MAX_RX_AVAIL;
9615 ering->rx_mini_max_pending = 0;
9616 ering->rx_jumbo_max_pending = 0;
9618 ering->rx_pending = bp->rx_ring_size;
9619 ering->rx_mini_pending = 0;
9620 ering->rx_jumbo_pending = 0;
9622 ering->tx_max_pending = MAX_TX_AVAIL;
9623 ering->tx_pending = bp->tx_ring_size;
9626 static int bnx2x_set_ringparam(struct net_device *dev,
9627 struct ethtool_ringparam *ering)
9629 struct bnx2x *bp = netdev_priv(dev);
9632 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9633 (ering->tx_pending > MAX_TX_AVAIL) ||
9634 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9637 bp->rx_ring_size = ering->rx_pending;
9638 bp->tx_ring_size = ering->tx_pending;
9640 if (netif_running(dev)) {
9641 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9642 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9648 static void bnx2x_get_pauseparam(struct net_device *dev,
9649 struct ethtool_pauseparam *epause)
9651 struct bnx2x *bp = netdev_priv(dev);
9653 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9654 BNX2X_FLOW_CTRL_AUTO) &&
9655 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9657 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9658 BNX2X_FLOW_CTRL_RX);
9659 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9660 BNX2X_FLOW_CTRL_TX);
9662 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9663 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9664 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9667 static int bnx2x_set_pauseparam(struct net_device *dev,
9668 struct ethtool_pauseparam *epause)
9670 struct bnx2x *bp = netdev_priv(dev);
9675 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9676 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9677 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9679 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9681 if (epause->rx_pause)
9682 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9684 if (epause->tx_pause)
9685 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9687 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9688 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9690 if (epause->autoneg) {
9691 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9692 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9696 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9697 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9701 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9703 if (netif_running(dev)) {
9704 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9711 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9713 struct bnx2x *bp = netdev_priv(dev);
9717 /* TPA requires Rx CSUM offloading */
9718 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9719 if (!(dev->features & NETIF_F_LRO)) {
9720 dev->features |= NETIF_F_LRO;
9721 bp->flags |= TPA_ENABLE_FLAG;
9725 } else if (dev->features & NETIF_F_LRO) {
9726 dev->features &= ~NETIF_F_LRO;
9727 bp->flags &= ~TPA_ENABLE_FLAG;
9731 if (changed && netif_running(dev)) {
9732 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9733 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9739 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9741 struct bnx2x *bp = netdev_priv(dev);
9746 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9748 struct bnx2x *bp = netdev_priv(dev);
9753 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9754 TPA'ed packets will be discarded due to wrong TCP CSUM */
9756 u32 flags = ethtool_op_get_flags(dev);
9758 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9764 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9767 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9768 dev->features |= NETIF_F_TSO6;
9770 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9771 dev->features &= ~NETIF_F_TSO6;
9777 static const struct {
9778 char string[ETH_GSTRING_LEN];
9779 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9780 { "register_test (offline)" },
9781 { "memory_test (offline)" },
9782 { "loopback_test (offline)" },
9783 { "nvram_test (online)" },
9784 { "interrupt_test (online)" },
9785 { "link_test (online)" },
9786 { "idle check (online)" }
9789 static int bnx2x_self_test_count(struct net_device *dev)
9791 return BNX2X_NUM_TESTS;
9794 static int bnx2x_test_registers(struct bnx2x *bp)
9796 int idx, i, rc = -ENODEV;
9798 int port = BP_PORT(bp);
9799 static const struct {
9804 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9805 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9806 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9807 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9808 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9809 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9810 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9811 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9812 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9813 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9814 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9815 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9816 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9817 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9818 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9819 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9820 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9821 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9822 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9823 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9824 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9825 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9826 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9827 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9828 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9829 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9830 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9831 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9832 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9833 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9834 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9835 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9836 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9837 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9838 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9839 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9840 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9842 { 0xffffffff, 0, 0x00000000 }
9845 if (!netif_running(bp->dev))
9848 /* Repeat the test twice:
9849 First by writing 0x00000000, second by writing 0xffffffff */
9850 for (idx = 0; idx < 2; idx++) {
9857 wr_val = 0xffffffff;
9861 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9862 u32 offset, mask, save_val, val;
9864 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9865 mask = reg_tbl[i].mask;
9867 save_val = REG_RD(bp, offset);
9869 REG_WR(bp, offset, wr_val);
9870 val = REG_RD(bp, offset);
9872 /* Restore the original register's value */
9873 REG_WR(bp, offset, save_val);
9875 /* verify that value is as expected value */
9876 if ((val & mask) != (wr_val & mask))
9887 static int bnx2x_test_memory(struct bnx2x *bp)
9889 int i, j, rc = -ENODEV;
9891 static const struct {
9895 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9896 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9897 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9898 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9899 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9900 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9901 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9905 static const struct {
9911 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9912 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9913 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9914 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9915 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9916 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9918 { NULL, 0xffffffff, 0, 0 }
9921 if (!netif_running(bp->dev))
9924 /* Go through all the memories */
9925 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9926 for (j = 0; j < mem_tbl[i].size; j++)
9927 REG_RD(bp, mem_tbl[i].offset + j*4);
9929 /* Check the parity status */
9930 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9931 val = REG_RD(bp, prty_tbl[i].offset);
9932 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9933 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9935 "%s is 0x%x\n", prty_tbl[i].name, val);
9946 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9951 while (bnx2x_link_test(bp) && cnt--)
9955 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9957 unsigned int pkt_size, num_pkts, i;
9958 struct sk_buff *skb;
9959 unsigned char *packet;
9960 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9961 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9962 u16 tx_start_idx, tx_idx;
9963 u16 rx_start_idx, rx_idx;
9964 u16 pkt_prod, bd_prod;
9965 struct sw_tx_bd *tx_buf;
9966 struct eth_tx_start_bd *tx_start_bd;
9967 struct eth_tx_parse_bd *pbd = NULL;
9969 union eth_rx_cqe *cqe;
9971 struct sw_rx_bd *rx_buf;
9975 /* check the loopback mode */
9976 switch (loopback_mode) {
9977 case BNX2X_PHY_LOOPBACK:
9978 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9981 case BNX2X_MAC_LOOPBACK:
9982 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9983 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9989 /* prepare the loopback packet */
9990 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9991 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9992 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9995 goto test_loopback_exit;
9997 packet = skb_put(skb, pkt_size);
9998 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9999 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10000 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10001 for (i = ETH_HLEN; i < pkt_size; i++)
10002 packet[i] = (unsigned char) (i & 0xff);
10004 /* send the loopback packet */
10006 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10007 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10009 pkt_prod = fp_tx->tx_pkt_prod++;
10010 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10011 tx_buf->first_bd = fp_tx->tx_bd_prod;
10015 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10016 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10017 mapping = pci_map_single(bp->pdev, skb->data,
10018 skb_headlen(skb), PCI_DMA_TODEVICE);
10019 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10020 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10021 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10022 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10023 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10024 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10025 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10026 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10028 /* turn on parsing and get a BD */
10029 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10030 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10032 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10036 fp_tx->tx_db.data.prod += 2;
10038 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10043 fp_tx->tx_bd_prod += 2; /* start + pbd */
10044 bp->dev->trans_start = jiffies;
10048 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10049 if (tx_idx != tx_start_idx + num_pkts)
10050 goto test_loopback_exit;
10052 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10053 if (rx_idx != rx_start_idx + num_pkts)
10054 goto test_loopback_exit;
10056 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10057 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10058 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10059 goto test_loopback_rx_exit;
10061 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10062 if (len != pkt_size)
10063 goto test_loopback_rx_exit;
10065 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10067 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10068 for (i = ETH_HLEN; i < pkt_size; i++)
10069 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10070 goto test_loopback_rx_exit;
10074 test_loopback_rx_exit:
10076 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10077 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10078 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10079 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10081 /* Update producers */
10082 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10083 fp_rx->rx_sge_prod);
10085 test_loopback_exit:
10086 bp->link_params.loopback_mode = LOOPBACK_NONE;
10091 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10095 if (!netif_running(bp->dev))
10096 return BNX2X_LOOPBACK_FAILED;
10098 bnx2x_netif_stop(bp, 1);
10099 bnx2x_acquire_phy_lock(bp);
10101 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10103 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10104 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10107 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10109 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10110 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10113 bnx2x_release_phy_lock(bp);
10114 bnx2x_netif_start(bp);
10119 #define CRC32_RESIDUAL 0xdebb20e3
10121 static int bnx2x_test_nvram(struct bnx2x *bp)
10123 static const struct {
10127 { 0, 0x14 }, /* bootstrap */
10128 { 0x14, 0xec }, /* dir */
10129 { 0x100, 0x350 }, /* manuf_info */
10130 { 0x450, 0xf0 }, /* feature_info */
10131 { 0x640, 0x64 }, /* upgrade_key_info */
10133 { 0x708, 0x70 }, /* manuf_key_info */
10137 __be32 buf[0x350 / 4];
10138 u8 *data = (u8 *)buf;
10142 rc = bnx2x_nvram_read(bp, 0, data, 4);
10144 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10145 goto test_nvram_exit;
10148 magic = be32_to_cpu(buf[0]);
10149 if (magic != 0x669955aa) {
10150 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10152 goto test_nvram_exit;
10155 for (i = 0; nvram_tbl[i].size; i++) {
10157 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10158 nvram_tbl[i].size);
10160 DP(NETIF_MSG_PROBE,
10161 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10162 goto test_nvram_exit;
10165 csum = ether_crc_le(nvram_tbl[i].size, data);
10166 if (csum != CRC32_RESIDUAL) {
10167 DP(NETIF_MSG_PROBE,
10168 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10170 goto test_nvram_exit;
10178 static int bnx2x_test_intr(struct bnx2x *bp)
10180 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10183 if (!netif_running(bp->dev))
10186 config->hdr.length = 0;
10187 if (CHIP_IS_E1(bp))
10188 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10190 config->hdr.offset = BP_FUNC(bp);
10191 config->hdr.client_id = bp->fp->cl_id;
10192 config->hdr.reserved1 = 0;
10194 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10195 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10196 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10198 bp->set_mac_pending++;
10199 for (i = 0; i < 10; i++) {
10200 if (!bp->set_mac_pending)
10202 msleep_interruptible(10);
10211 static void bnx2x_self_test(struct net_device *dev,
10212 struct ethtool_test *etest, u64 *buf)
10214 struct bnx2x *bp = netdev_priv(dev);
10216 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10218 if (!netif_running(dev))
10221 /* offline tests are not supported in MF mode */
10223 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10225 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10226 int port = BP_PORT(bp);
10230 /* save current value of input enable for TX port IF */
10231 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10232 /* disable input for TX port IF */
10233 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10235 link_up = bp->link_vars.link_up;
10236 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10237 bnx2x_nic_load(bp, LOAD_DIAG);
10238 /* wait until link state is restored */
10239 bnx2x_wait_for_link(bp, link_up);
10241 if (bnx2x_test_registers(bp) != 0) {
10243 etest->flags |= ETH_TEST_FL_FAILED;
10245 if (bnx2x_test_memory(bp) != 0) {
10247 etest->flags |= ETH_TEST_FL_FAILED;
10249 buf[2] = bnx2x_test_loopback(bp, link_up);
10251 etest->flags |= ETH_TEST_FL_FAILED;
10253 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10255 /* restore input for TX port IF */
10256 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10258 bnx2x_nic_load(bp, LOAD_NORMAL);
10259 /* wait until link state is restored */
10260 bnx2x_wait_for_link(bp, link_up);
10262 if (bnx2x_test_nvram(bp) != 0) {
10264 etest->flags |= ETH_TEST_FL_FAILED;
10266 if (bnx2x_test_intr(bp) != 0) {
10268 etest->flags |= ETH_TEST_FL_FAILED;
10271 if (bnx2x_link_test(bp) != 0) {
10273 etest->flags |= ETH_TEST_FL_FAILED;
10276 #ifdef BNX2X_EXTRA_DEBUG
10277 bnx2x_panic_dump(bp);
10281 static const struct {
10284 u8 string[ETH_GSTRING_LEN];
10285 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10286 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10287 { Q_STATS_OFFSET32(error_bytes_received_hi),
10288 8, "[%d]: rx_error_bytes" },
10289 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10290 8, "[%d]: rx_ucast_packets" },
10291 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10292 8, "[%d]: rx_mcast_packets" },
10293 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10294 8, "[%d]: rx_bcast_packets" },
10295 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10296 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10297 4, "[%d]: rx_phy_ip_err_discards"},
10298 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10299 4, "[%d]: rx_skb_alloc_discard" },
10300 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10302 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10303 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10304 8, "[%d]: tx_packets" }
10307 static const struct {
10311 #define STATS_FLAGS_PORT 1
10312 #define STATS_FLAGS_FUNC 2
10313 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10314 u8 string[ETH_GSTRING_LEN];
10315 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10316 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10317 8, STATS_FLAGS_BOTH, "rx_bytes" },
10318 { STATS_OFFSET32(error_bytes_received_hi),
10319 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10320 { STATS_OFFSET32(total_unicast_packets_received_hi),
10321 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10322 { STATS_OFFSET32(total_multicast_packets_received_hi),
10323 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10324 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10325 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10326 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10327 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10328 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10329 8, STATS_FLAGS_PORT, "rx_align_errors" },
10330 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10331 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10332 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10333 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10334 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10335 8, STATS_FLAGS_PORT, "rx_fragments" },
10336 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10337 8, STATS_FLAGS_PORT, "rx_jabbers" },
10338 { STATS_OFFSET32(no_buff_discard_hi),
10339 8, STATS_FLAGS_BOTH, "rx_discards" },
10340 { STATS_OFFSET32(mac_filter_discard),
10341 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10342 { STATS_OFFSET32(xxoverflow_discard),
10343 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10344 { STATS_OFFSET32(brb_drop_hi),
10345 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10346 { STATS_OFFSET32(brb_truncate_hi),
10347 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10348 { STATS_OFFSET32(pause_frames_received_hi),
10349 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10350 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10351 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10352 { STATS_OFFSET32(nig_timer_max),
10353 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10354 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10355 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10356 { STATS_OFFSET32(rx_skb_alloc_failed),
10357 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10358 { STATS_OFFSET32(hw_csum_err),
10359 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10361 { STATS_OFFSET32(total_bytes_transmitted_hi),
10362 8, STATS_FLAGS_BOTH, "tx_bytes" },
10363 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10364 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10365 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10366 8, STATS_FLAGS_BOTH, "tx_packets" },
10367 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10368 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10369 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10370 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10371 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10372 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10373 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10374 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10375 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10376 8, STATS_FLAGS_PORT, "tx_deferred" },
10377 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10378 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10379 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10380 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10381 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10382 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10383 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10384 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10385 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10386 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10387 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10388 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10389 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10390 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10391 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10392 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10393 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10394 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10395 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10396 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10397 { STATS_OFFSET32(pause_frames_sent_hi),
10398 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10401 #define IS_PORT_STAT(i) \
10402 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10403 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10404 #define IS_E1HMF_MODE_STAT(bp) \
10405 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10407 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10409 struct bnx2x *bp = netdev_priv(dev);
10412 switch (stringset) {
10414 if (is_multi(bp)) {
10416 for_each_rx_queue(bp, i) {
10417 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10418 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10419 bnx2x_q_stats_arr[j].string, i);
10420 k += BNX2X_NUM_Q_STATS;
10422 if (IS_E1HMF_MODE_STAT(bp))
10424 for (j = 0; j < BNX2X_NUM_STATS; j++)
10425 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10426 bnx2x_stats_arr[j].string);
10428 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10429 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10431 strcpy(buf + j*ETH_GSTRING_LEN,
10432 bnx2x_stats_arr[i].string);
10439 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10444 static int bnx2x_get_stats_count(struct net_device *dev)
10446 struct bnx2x *bp = netdev_priv(dev);
10449 if (is_multi(bp)) {
10450 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10451 if (!IS_E1HMF_MODE_STAT(bp))
10452 num_stats += BNX2X_NUM_STATS;
10454 if (IS_E1HMF_MODE_STAT(bp)) {
10456 for (i = 0; i < BNX2X_NUM_STATS; i++)
10457 if (IS_FUNC_STAT(i))
10460 num_stats = BNX2X_NUM_STATS;
10466 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10467 struct ethtool_stats *stats, u64 *buf)
10469 struct bnx2x *bp = netdev_priv(dev);
10470 u32 *hw_stats, *offset;
10473 if (is_multi(bp)) {
10475 for_each_rx_queue(bp, i) {
10476 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10477 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10478 if (bnx2x_q_stats_arr[j].size == 0) {
10479 /* skip this counter */
10483 offset = (hw_stats +
10484 bnx2x_q_stats_arr[j].offset);
10485 if (bnx2x_q_stats_arr[j].size == 4) {
10486 /* 4-byte counter */
10487 buf[k + j] = (u64) *offset;
10490 /* 8-byte counter */
10491 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10493 k += BNX2X_NUM_Q_STATS;
10495 if (IS_E1HMF_MODE_STAT(bp))
10497 hw_stats = (u32 *)&bp->eth_stats;
10498 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10499 if (bnx2x_stats_arr[j].size == 0) {
10500 /* skip this counter */
10504 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10505 if (bnx2x_stats_arr[j].size == 4) {
10506 /* 4-byte counter */
10507 buf[k + j] = (u64) *offset;
10510 /* 8-byte counter */
10511 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10514 hw_stats = (u32 *)&bp->eth_stats;
10515 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10516 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10518 if (bnx2x_stats_arr[i].size == 0) {
10519 /* skip this counter */
10524 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10525 if (bnx2x_stats_arr[i].size == 4) {
10526 /* 4-byte counter */
10527 buf[j] = (u64) *offset;
10531 /* 8-byte counter */
10532 buf[j] = HILO_U64(*offset, *(offset + 1));
10538 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10540 struct bnx2x *bp = netdev_priv(dev);
10541 int port = BP_PORT(bp);
10544 if (!netif_running(dev))
10553 for (i = 0; i < (data * 2); i++) {
10555 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10556 bp->link_params.hw_led_mode,
10557 bp->link_params.chip_id);
10559 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10560 bp->link_params.hw_led_mode,
10561 bp->link_params.chip_id);
10563 msleep_interruptible(500);
10564 if (signal_pending(current))
10568 if (bp->link_vars.link_up)
10569 bnx2x_set_led(bp, port, LED_MODE_OPER,
10570 bp->link_vars.line_speed,
10571 bp->link_params.hw_led_mode,
10572 bp->link_params.chip_id);
10577 static struct ethtool_ops bnx2x_ethtool_ops = {
10578 .get_settings = bnx2x_get_settings,
10579 .set_settings = bnx2x_set_settings,
10580 .get_drvinfo = bnx2x_get_drvinfo,
10581 .get_regs_len = bnx2x_get_regs_len,
10582 .get_regs = bnx2x_get_regs,
10583 .get_wol = bnx2x_get_wol,
10584 .set_wol = bnx2x_set_wol,
10585 .get_msglevel = bnx2x_get_msglevel,
10586 .set_msglevel = bnx2x_set_msglevel,
10587 .nway_reset = bnx2x_nway_reset,
10588 .get_link = bnx2x_get_link,
10589 .get_eeprom_len = bnx2x_get_eeprom_len,
10590 .get_eeprom = bnx2x_get_eeprom,
10591 .set_eeprom = bnx2x_set_eeprom,
10592 .get_coalesce = bnx2x_get_coalesce,
10593 .set_coalesce = bnx2x_set_coalesce,
10594 .get_ringparam = bnx2x_get_ringparam,
10595 .set_ringparam = bnx2x_set_ringparam,
10596 .get_pauseparam = bnx2x_get_pauseparam,
10597 .set_pauseparam = bnx2x_set_pauseparam,
10598 .get_rx_csum = bnx2x_get_rx_csum,
10599 .set_rx_csum = bnx2x_set_rx_csum,
10600 .get_tx_csum = ethtool_op_get_tx_csum,
10601 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10602 .set_flags = bnx2x_set_flags,
10603 .get_flags = ethtool_op_get_flags,
10604 .get_sg = ethtool_op_get_sg,
10605 .set_sg = ethtool_op_set_sg,
10606 .get_tso = ethtool_op_get_tso,
10607 .set_tso = bnx2x_set_tso,
10608 .self_test_count = bnx2x_self_test_count,
10609 .self_test = bnx2x_self_test,
10610 .get_strings = bnx2x_get_strings,
10611 .phys_id = bnx2x_phys_id,
10612 .get_stats_count = bnx2x_get_stats_count,
10613 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10616 /* end of ethtool_ops */
10618 /****************************************************************************
10619 * General service functions
10620 ****************************************************************************/
10622 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10626 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10630 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10631 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10632 PCI_PM_CTRL_PME_STATUS));
10634 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10635 /* delay required during transition out of D3hot */
10640 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10644 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10646 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10649 /* No more memory access after this point until
10650 * device is brought back to D0.
10660 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10664 /* Tell compiler that status block fields can change */
10666 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10667 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10669 return (fp->rx_comp_cons != rx_cons_sb);
10673 * net_device service functions
10676 static int bnx2x_poll(struct napi_struct *napi, int budget)
10678 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10680 struct bnx2x *bp = fp->bp;
10683 #ifdef BNX2X_STOP_ON_ERROR
10684 if (unlikely(bp->panic))
10688 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10689 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10691 bnx2x_update_fpsb_idx(fp);
10693 if (bnx2x_has_rx_work(fp)) {
10694 work_done = bnx2x_rx_int(fp, budget);
10696 /* must not complete if we consumed full budget */
10697 if (work_done >= budget)
10701 /* bnx2x_has_rx_work() reads the status block, thus we need to
10702 * ensure that status block indices have been actually read
10703 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10704 * so that we won't write the "newer" value of the status block to IGU
10705 * (if there was a DMA right after bnx2x_has_rx_work and
10706 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10707 * may be postponed to right before bnx2x_ack_sb). In this case
10708 * there will never be another interrupt until there is another update
10709 * of the status block, while there is still unhandled work.
10713 if (!bnx2x_has_rx_work(fp)) {
10714 #ifdef BNX2X_STOP_ON_ERROR
10717 napi_complete(napi);
10719 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10720 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10721 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10722 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10730 /* we split the first BD into headers and data BDs
10731 * to ease the pain of our fellow microcode engineers
10732 * we use one mapping for both BDs
10733 * So far this has only been observed to happen
10734 * in Other Operating Systems(TM)
10736 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10737 struct bnx2x_fastpath *fp,
10738 struct sw_tx_bd *tx_buf,
10739 struct eth_tx_start_bd **tx_bd, u16 hlen,
10740 u16 bd_prod, int nbd)
10742 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10743 struct eth_tx_bd *d_tx_bd;
10744 dma_addr_t mapping;
10745 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10747 /* first fix first BD */
10748 h_tx_bd->nbd = cpu_to_le16(nbd);
10749 h_tx_bd->nbytes = cpu_to_le16(hlen);
10751 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10752 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10753 h_tx_bd->addr_lo, h_tx_bd->nbd);
10755 /* now get a new data BD
10756 * (after the pbd) and fill it */
10757 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10758 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10760 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10761 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10763 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10764 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10765 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10767 /* this marks the BD as one that has no individual mapping */
10768 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10770 DP(NETIF_MSG_TX_QUEUED,
10771 "TSO split data size is %d (%x:%x)\n",
10772 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10775 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10780 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10783 csum = (u16) ~csum_fold(csum_sub(csum,
10784 csum_partial(t_header - fix, fix, 0)));
10787 csum = (u16) ~csum_fold(csum_add(csum,
10788 csum_partial(t_header, -fix, 0)));
10790 return swab16(csum);
10793 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10797 if (skb->ip_summed != CHECKSUM_PARTIAL)
10801 if (skb->protocol == htons(ETH_P_IPV6)) {
10803 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10804 rc |= XMIT_CSUM_TCP;
10808 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10809 rc |= XMIT_CSUM_TCP;
10813 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10816 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10822 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10823 /* check if packet requires linearization (packet is too fragmented)
10824 no need to check fragmentation if page size > 8K (there will be no
10825 violation to FW restrictions) */
10826 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10831 int first_bd_sz = 0;
10833 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10834 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10836 if (xmit_type & XMIT_GSO) {
10837 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10838 /* Check if LSO packet needs to be copied:
10839 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10840 int wnd_size = MAX_FETCH_BD - 3;
10841 /* Number of windows to check */
10842 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10847 /* Headers length */
10848 hlen = (int)(skb_transport_header(skb) - skb->data) +
10851 /* Amount of data (w/o headers) on linear part of SKB*/
10852 first_bd_sz = skb_headlen(skb) - hlen;
10854 wnd_sum = first_bd_sz;
10856 /* Calculate the first sum - it's special */
10857 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10859 skb_shinfo(skb)->frags[frag_idx].size;
10861 /* If there was data on linear skb data - check it */
10862 if (first_bd_sz > 0) {
10863 if (unlikely(wnd_sum < lso_mss)) {
10868 wnd_sum -= first_bd_sz;
10871 /* Others are easier: run through the frag list and
10872 check all windows */
10873 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10875 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10877 if (unlikely(wnd_sum < lso_mss)) {
10882 skb_shinfo(skb)->frags[wnd_idx].size;
10885 /* in non-LSO too fragmented packet should always
10892 if (unlikely(to_copy))
10893 DP(NETIF_MSG_TX_QUEUED,
10894 "Linearization IS REQUIRED for %s packet. "
10895 "num_frags %d hlen %d first_bd_sz %d\n",
10896 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10897 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10903 /* called with netif_tx_lock
10904 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10905 * netif_wake_queue()
10907 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10909 struct bnx2x *bp = netdev_priv(dev);
10910 struct bnx2x_fastpath *fp, *fp_stat;
10911 struct netdev_queue *txq;
10912 struct sw_tx_bd *tx_buf;
10913 struct eth_tx_start_bd *tx_start_bd;
10914 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10915 struct eth_tx_parse_bd *pbd = NULL;
10916 u16 pkt_prod, bd_prod;
10918 dma_addr_t mapping;
10919 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10922 __le16 pkt_size = 0;
10924 #ifdef BNX2X_STOP_ON_ERROR
10925 if (unlikely(bp->panic))
10926 return NETDEV_TX_BUSY;
10929 fp_index = skb_get_queue_mapping(skb);
10930 txq = netdev_get_tx_queue(dev, fp_index);
10932 fp = &bp->fp[fp_index + bp->num_rx_queues];
10933 fp_stat = &bp->fp[fp_index];
10935 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10936 fp_stat->eth_q_stats.driver_xoff++;
10937 netif_tx_stop_queue(txq);
10938 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10939 return NETDEV_TX_BUSY;
10942 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10943 " gso type %x xmit_type %x\n",
10944 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10945 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10947 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10948 /* First, check if we need to linearize the skb (due to FW
10949 restrictions). No need to check fragmentation if page size > 8K
10950 (there will be no violation to FW restrictions) */
10951 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10952 /* Statistics of linearization */
10954 if (skb_linearize(skb) != 0) {
10955 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10956 "silently dropping this SKB\n");
10957 dev_kfree_skb_any(skb);
10958 return NETDEV_TX_OK;
10964 Please read carefully. First we use one BD which we mark as start,
10965 then we have a parsing info BD (used for TSO or xsum),
10966 and only then we have the rest of the TSO BDs.
10967 (don't forget to mark the last one as last,
10968 and to unmap only AFTER you write to the BD ...)
10969 And above all, all pdb sizes are in words - NOT DWORDS!
10972 pkt_prod = fp->tx_pkt_prod++;
10973 bd_prod = TX_BD(fp->tx_bd_prod);
10975 /* get a tx_buf and first BD */
10976 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10977 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10979 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10980 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10981 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10983 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10985 /* remember the first BD of the packet */
10986 tx_buf->first_bd = fp->tx_bd_prod;
10990 DP(NETIF_MSG_TX_QUEUED,
10991 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10992 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10995 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10996 (bp->flags & HW_VLAN_TX_FLAG)) {
10997 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10998 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11001 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11003 /* turn on parsing and get a BD */
11004 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11005 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11007 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11009 if (xmit_type & XMIT_CSUM) {
11010 hlen = (skb_network_header(skb) - skb->data) / 2;
11012 /* for now NS flag is not used in Linux */
11014 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11015 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11017 pbd->ip_hlen = (skb_transport_header(skb) -
11018 skb_network_header(skb)) / 2;
11020 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11022 pbd->total_hlen = cpu_to_le16(hlen);
11025 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11027 if (xmit_type & XMIT_CSUM_V4)
11028 tx_start_bd->bd_flags.as_bitfield |=
11029 ETH_TX_BD_FLAGS_IP_CSUM;
11031 tx_start_bd->bd_flags.as_bitfield |=
11032 ETH_TX_BD_FLAGS_IPV6;
11034 if (xmit_type & XMIT_CSUM_TCP) {
11035 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11038 s8 fix = SKB_CS_OFF(skb); /* signed! */
11040 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11042 DP(NETIF_MSG_TX_QUEUED,
11043 "hlen %d fix %d csum before fix %x\n",
11044 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11046 /* HW bug: fixup the CSUM */
11047 pbd->tcp_pseudo_csum =
11048 bnx2x_csum_fix(skb_transport_header(skb),
11051 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11052 pbd->tcp_pseudo_csum);
11056 mapping = pci_map_single(bp->pdev, skb->data,
11057 skb_headlen(skb), PCI_DMA_TODEVICE);
11059 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11060 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11061 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11062 tx_start_bd->nbd = cpu_to_le16(nbd);
11063 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11064 pkt_size = tx_start_bd->nbytes;
11066 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11067 " nbytes %d flags %x vlan %x\n",
11068 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11069 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11070 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11072 if (xmit_type & XMIT_GSO) {
11074 DP(NETIF_MSG_TX_QUEUED,
11075 "TSO packet len %d hlen %d total len %d tso size %d\n",
11076 skb->len, hlen, skb_headlen(skb),
11077 skb_shinfo(skb)->gso_size);
11079 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11081 if (unlikely(skb_headlen(skb) > hlen))
11082 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11083 hlen, bd_prod, ++nbd);
11085 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11086 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11087 pbd->tcp_flags = pbd_tcp_flags(skb);
11089 if (xmit_type & XMIT_GSO_V4) {
11090 pbd->ip_id = swab16(ip_hdr(skb)->id);
11091 pbd->tcp_pseudo_csum =
11092 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11093 ip_hdr(skb)->daddr,
11094 0, IPPROTO_TCP, 0));
11097 pbd->tcp_pseudo_csum =
11098 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11099 &ipv6_hdr(skb)->daddr,
11100 0, IPPROTO_TCP, 0));
11102 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11104 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11106 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11107 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11110 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11111 if (total_pkt_bd == NULL)
11112 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11114 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11115 frag->size, PCI_DMA_TODEVICE);
11117 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11118 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11119 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11120 le16_add_cpu(&pkt_size, frag->size);
11122 DP(NETIF_MSG_TX_QUEUED,
11123 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11124 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11125 le16_to_cpu(tx_data_bd->nbytes));
11128 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11130 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11132 /* now send a tx doorbell, counting the next BD
11133 * if the packet contains or ends with it
11135 if (TX_BD_POFF(bd_prod) < nbd)
11138 if (total_pkt_bd != NULL)
11139 total_pkt_bd->total_pkt_bytes = pkt_size;
11142 DP(NETIF_MSG_TX_QUEUED,
11143 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11144 " tcp_flags %x xsum %x seq %u hlen %u\n",
11145 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11146 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11147 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11149 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11152 * Make sure that the BD data is updated before updating the producer
11153 * since FW might read the BD right after the producer is updated.
11154 * This is only applicable for weak-ordered memory model archs such
11155 * as IA-64. The following barrier is also mandatory since FW will
11156 * assumes packets must have BDs.
11160 fp->tx_db.data.prod += nbd;
11162 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11166 fp->tx_bd_prod += nbd;
11168 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11169 netif_tx_stop_queue(txq);
11170 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11171 if we put Tx into XOFF state. */
11173 fp_stat->eth_q_stats.driver_xoff++;
11174 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11175 netif_tx_wake_queue(txq);
11179 return NETDEV_TX_OK;
11182 /* called with rtnl_lock */
11183 static int bnx2x_open(struct net_device *dev)
11185 struct bnx2x *bp = netdev_priv(dev);
11187 netif_carrier_off(dev);
11189 bnx2x_set_power_state(bp, PCI_D0);
11191 return bnx2x_nic_load(bp, LOAD_OPEN);
11194 /* called with rtnl_lock */
11195 static int bnx2x_close(struct net_device *dev)
11197 struct bnx2x *bp = netdev_priv(dev);
11199 /* Unload the driver, release IRQs */
11200 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11201 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11202 if (!CHIP_REV_IS_SLOW(bp))
11203 bnx2x_set_power_state(bp, PCI_D3hot);
11208 /* called with netif_tx_lock from dev_mcast.c */
11209 static void bnx2x_set_rx_mode(struct net_device *dev)
11211 struct bnx2x *bp = netdev_priv(dev);
11212 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11213 int port = BP_PORT(bp);
11215 if (bp->state != BNX2X_STATE_OPEN) {
11216 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11220 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11222 if (dev->flags & IFF_PROMISC)
11223 rx_mode = BNX2X_RX_MODE_PROMISC;
11225 else if ((dev->flags & IFF_ALLMULTI) ||
11226 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11227 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11229 else { /* some multicasts */
11230 if (CHIP_IS_E1(bp)) {
11231 int i, old, offset;
11232 struct dev_mc_list *mclist;
11233 struct mac_configuration_cmd *config =
11234 bnx2x_sp(bp, mcast_config);
11236 for (i = 0, mclist = dev->mc_list;
11237 mclist && (i < dev->mc_count);
11238 i++, mclist = mclist->next) {
11240 config->config_table[i].
11241 cam_entry.msb_mac_addr =
11242 swab16(*(u16 *)&mclist->dmi_addr[0]);
11243 config->config_table[i].
11244 cam_entry.middle_mac_addr =
11245 swab16(*(u16 *)&mclist->dmi_addr[2]);
11246 config->config_table[i].
11247 cam_entry.lsb_mac_addr =
11248 swab16(*(u16 *)&mclist->dmi_addr[4]);
11249 config->config_table[i].cam_entry.flags =
11251 config->config_table[i].
11252 target_table_entry.flags = 0;
11253 config->config_table[i].target_table_entry.
11254 clients_bit_vector =
11255 cpu_to_le32(1 << BP_L_ID(bp));
11256 config->config_table[i].
11257 target_table_entry.vlan_id = 0;
11260 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11261 config->config_table[i].
11262 cam_entry.msb_mac_addr,
11263 config->config_table[i].
11264 cam_entry.middle_mac_addr,
11265 config->config_table[i].
11266 cam_entry.lsb_mac_addr);
11268 old = config->hdr.length;
11270 for (; i < old; i++) {
11271 if (CAM_IS_INVALID(config->
11272 config_table[i])) {
11273 /* already invalidated */
11277 CAM_INVALIDATE(config->
11282 if (CHIP_REV_IS_SLOW(bp))
11283 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11285 offset = BNX2X_MAX_MULTICAST*(1 + port);
11287 config->hdr.length = i;
11288 config->hdr.offset = offset;
11289 config->hdr.client_id = bp->fp->cl_id;
11290 config->hdr.reserved1 = 0;
11292 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11293 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11294 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11297 /* Accept one or more multicasts */
11298 struct dev_mc_list *mclist;
11299 u32 mc_filter[MC_HASH_SIZE];
11300 u32 crc, bit, regidx;
11303 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11305 for (i = 0, mclist = dev->mc_list;
11306 mclist && (i < dev->mc_count);
11307 i++, mclist = mclist->next) {
11309 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11312 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11313 bit = (crc >> 24) & 0xff;
11316 mc_filter[regidx] |= (1 << bit);
11319 for (i = 0; i < MC_HASH_SIZE; i++)
11320 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11325 bp->rx_mode = rx_mode;
11326 bnx2x_set_storm_rx_mode(bp);
11329 /* called with rtnl_lock */
11330 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11332 struct sockaddr *addr = p;
11333 struct bnx2x *bp = netdev_priv(dev);
11335 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11338 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11339 if (netif_running(dev)) {
11340 if (CHIP_IS_E1(bp))
11341 bnx2x_set_mac_addr_e1(bp, 1);
11343 bnx2x_set_mac_addr_e1h(bp, 1);
11349 /* called with rtnl_lock */
11350 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11351 int devad, u16 addr)
11353 struct bnx2x *bp = netdev_priv(netdev);
11356 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11358 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11359 prtad, devad, addr);
11361 if (prtad != bp->mdio.prtad) {
11362 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11363 prtad, bp->mdio.prtad);
11367 /* The HW expects different devad if CL22 is used */
11368 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11370 bnx2x_acquire_phy_lock(bp);
11371 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11372 devad, addr, &value);
11373 bnx2x_release_phy_lock(bp);
11374 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11381 /* called with rtnl_lock */
11382 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11383 u16 addr, u16 value)
11385 struct bnx2x *bp = netdev_priv(netdev);
11386 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11389 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11390 " value 0x%x\n", prtad, devad, addr, value);
11392 if (prtad != bp->mdio.prtad) {
11393 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11394 prtad, bp->mdio.prtad);
11398 /* The HW expects different devad if CL22 is used */
11399 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11401 bnx2x_acquire_phy_lock(bp);
11402 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11403 devad, addr, value);
11404 bnx2x_release_phy_lock(bp);
11408 /* called with rtnl_lock */
11409 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11411 struct bnx2x *bp = netdev_priv(dev);
11412 struct mii_ioctl_data *mdio = if_mii(ifr);
11414 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11415 mdio->phy_id, mdio->reg_num, mdio->val_in);
11417 if (!netif_running(dev))
11420 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11423 /* called with rtnl_lock */
11424 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11426 struct bnx2x *bp = netdev_priv(dev);
11429 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11430 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11433 /* This does not race with packet allocation
11434 * because the actual alloc size is
11435 * only updated as part of load
11437 dev->mtu = new_mtu;
11439 if (netif_running(dev)) {
11440 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11441 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11447 static void bnx2x_tx_timeout(struct net_device *dev)
11449 struct bnx2x *bp = netdev_priv(dev);
11451 #ifdef BNX2X_STOP_ON_ERROR
11455 /* This allows the netif to be shutdown gracefully before resetting */
11456 schedule_work(&bp->reset_task);
11460 /* called with rtnl_lock */
11461 static void bnx2x_vlan_rx_register(struct net_device *dev,
11462 struct vlan_group *vlgrp)
11464 struct bnx2x *bp = netdev_priv(dev);
11468 /* Set flags according to the required capabilities */
11469 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11471 if (dev->features & NETIF_F_HW_VLAN_TX)
11472 bp->flags |= HW_VLAN_TX_FLAG;
11474 if (dev->features & NETIF_F_HW_VLAN_RX)
11475 bp->flags |= HW_VLAN_RX_FLAG;
11477 if (netif_running(dev))
11478 bnx2x_set_client_config(bp);
11483 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11484 static void poll_bnx2x(struct net_device *dev)
11486 struct bnx2x *bp = netdev_priv(dev);
11488 disable_irq(bp->pdev->irq);
11489 bnx2x_interrupt(bp->pdev->irq, dev);
11490 enable_irq(bp->pdev->irq);
11494 static const struct net_device_ops bnx2x_netdev_ops = {
11495 .ndo_open = bnx2x_open,
11496 .ndo_stop = bnx2x_close,
11497 .ndo_start_xmit = bnx2x_start_xmit,
11498 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11499 .ndo_set_mac_address = bnx2x_change_mac_addr,
11500 .ndo_validate_addr = eth_validate_addr,
11501 .ndo_do_ioctl = bnx2x_ioctl,
11502 .ndo_change_mtu = bnx2x_change_mtu,
11503 .ndo_tx_timeout = bnx2x_tx_timeout,
11505 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11507 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11508 .ndo_poll_controller = poll_bnx2x,
11512 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11513 struct net_device *dev)
11518 SET_NETDEV_DEV(dev, &pdev->dev);
11519 bp = netdev_priv(dev);
11524 bp->func = PCI_FUNC(pdev->devfn);
11526 rc = pci_enable_device(pdev);
11528 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11532 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11533 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11536 goto err_out_disable;
11539 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11540 printk(KERN_ERR PFX "Cannot find second PCI device"
11541 " base address, aborting\n");
11543 goto err_out_disable;
11546 if (atomic_read(&pdev->enable_cnt) == 1) {
11547 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11549 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11551 goto err_out_disable;
11554 pci_set_master(pdev);
11555 pci_save_state(pdev);
11558 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11559 if (bp->pm_cap == 0) {
11560 printk(KERN_ERR PFX "Cannot find power management"
11561 " capability, aborting\n");
11563 goto err_out_release;
11566 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11567 if (bp->pcie_cap == 0) {
11568 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11571 goto err_out_release;
11574 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11575 bp->flags |= USING_DAC_FLAG;
11576 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11577 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11578 " failed, aborting\n");
11580 goto err_out_release;
11583 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11584 printk(KERN_ERR PFX "System does not support DMA,"
11587 goto err_out_release;
11590 dev->mem_start = pci_resource_start(pdev, 0);
11591 dev->base_addr = dev->mem_start;
11592 dev->mem_end = pci_resource_end(pdev, 0);
11594 dev->irq = pdev->irq;
11596 bp->regview = pci_ioremap_bar(pdev, 0);
11597 if (!bp->regview) {
11598 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11600 goto err_out_release;
11603 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11604 min_t(u64, BNX2X_DB_SIZE,
11605 pci_resource_len(pdev, 2)));
11606 if (!bp->doorbells) {
11607 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11609 goto err_out_unmap;
11612 bnx2x_set_power_state(bp, PCI_D0);
11614 /* clean indirect addresses */
11615 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11616 PCICFG_VENDOR_ID_OFFSET);
11617 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11618 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11619 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11620 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11622 dev->watchdog_timeo = TX_TIMEOUT;
11624 dev->netdev_ops = &bnx2x_netdev_ops;
11625 dev->ethtool_ops = &bnx2x_ethtool_ops;
11626 dev->features |= NETIF_F_SG;
11627 dev->features |= NETIF_F_HW_CSUM;
11628 if (bp->flags & USING_DAC_FLAG)
11629 dev->features |= NETIF_F_HIGHDMA;
11630 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11631 dev->features |= NETIF_F_TSO6;
11633 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11634 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11636 dev->vlan_features |= NETIF_F_SG;
11637 dev->vlan_features |= NETIF_F_HW_CSUM;
11638 if (bp->flags & USING_DAC_FLAG)
11639 dev->vlan_features |= NETIF_F_HIGHDMA;
11640 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11641 dev->vlan_features |= NETIF_F_TSO6;
11644 /* get_port_hwinfo() will set prtad and mmds properly */
11645 bp->mdio.prtad = MDIO_PRTAD_NONE;
11647 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11648 bp->mdio.dev = dev;
11649 bp->mdio.mdio_read = bnx2x_mdio_read;
11650 bp->mdio.mdio_write = bnx2x_mdio_write;
11656 iounmap(bp->regview);
11657 bp->regview = NULL;
11659 if (bp->doorbells) {
11660 iounmap(bp->doorbells);
11661 bp->doorbells = NULL;
11665 if (atomic_read(&pdev->enable_cnt) == 1)
11666 pci_release_regions(pdev);
11669 pci_disable_device(pdev);
11670 pci_set_drvdata(pdev, NULL);
11676 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11677 int *width, int *speed)
11679 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11681 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11683 /* return value of 1=2.5GHz 2=5GHz */
11684 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11687 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11689 const struct firmware *firmware = bp->firmware;
11690 struct bnx2x_fw_file_hdr *fw_hdr;
11691 struct bnx2x_fw_file_section *sections;
11692 u32 offset, len, num_ops;
11697 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11700 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11701 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11703 /* Make sure none of the offsets and sizes make us read beyond
11704 * the end of the firmware data */
11705 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11706 offset = be32_to_cpu(sections[i].offset);
11707 len = be32_to_cpu(sections[i].len);
11708 if (offset + len > firmware->size) {
11709 printk(KERN_ERR PFX "Section %d length is out of "
11715 /* Likewise for the init_ops offsets */
11716 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11717 ops_offsets = (u16 *)(firmware->data + offset);
11718 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11720 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11721 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11722 printk(KERN_ERR PFX "Section offset %d is out of "
11728 /* Check FW version */
11729 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11730 fw_ver = firmware->data + offset;
11731 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11732 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11733 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11734 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11735 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11736 " Should be %d.%d.%d.%d\n",
11737 fw_ver[0], fw_ver[1], fw_ver[2],
11738 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11739 BCM_5710_FW_MINOR_VERSION,
11740 BCM_5710_FW_REVISION_VERSION,
11741 BCM_5710_FW_ENGINEERING_VERSION);
11748 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11751 const __be32 *source = (const __be32*)_source;
11752 u32 *target = (u32*)_target;
11754 for (i = 0; i < n/4; i++)
11755 target[i] = be32_to_cpu(source[i]);
11759 Ops array is stored in the following format:
11760 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11762 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11765 const __be32 *source = (const __be32*)_source;
11766 struct raw_op *target = (struct raw_op*)_target;
11768 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11769 tmp = be32_to_cpu(source[j]);
11770 target[i].op = (tmp >> 24) & 0xff;
11771 target[i].offset = tmp & 0xffffff;
11772 target[i].raw_data = be32_to_cpu(source[j+1]);
11775 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11778 u16 *target = (u16*)_target;
11779 const __be16 *source = (const __be16*)_source;
11781 for (i = 0; i < n/2; i++)
11782 target[i] = be16_to_cpu(source[i]);
11785 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11787 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11788 bp->arr = kmalloc(len, GFP_KERNEL); \
11790 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11793 func(bp->firmware->data + \
11794 be32_to_cpu(fw_hdr->arr.offset), \
11795 (u8*)bp->arr, len); \
11799 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11801 char fw_file_name[40] = {0};
11803 struct bnx2x_fw_file_hdr *fw_hdr;
11805 /* Create a FW file name */
11806 if (CHIP_IS_E1(bp))
11807 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11809 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11811 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11812 BCM_5710_FW_MAJOR_VERSION,
11813 BCM_5710_FW_MINOR_VERSION,
11814 BCM_5710_FW_REVISION_VERSION,
11815 BCM_5710_FW_ENGINEERING_VERSION);
11817 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11819 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11821 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11822 goto request_firmware_exit;
11825 rc = bnx2x_check_firmware(bp);
11827 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11828 goto request_firmware_exit;
11831 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11833 /* Initialize the pointers to the init arrays */
11835 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11838 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11841 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11843 /* STORMs firmware */
11844 bp->tsem_int_table_data = bp->firmware->data +
11845 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11846 bp->tsem_pram_data = bp->firmware->data +
11847 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11848 bp->usem_int_table_data = bp->firmware->data +
11849 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11850 bp->usem_pram_data = bp->firmware->data +
11851 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11852 bp->xsem_int_table_data = bp->firmware->data +
11853 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11854 bp->xsem_pram_data = bp->firmware->data +
11855 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11856 bp->csem_int_table_data = bp->firmware->data +
11857 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11858 bp->csem_pram_data = bp->firmware->data +
11859 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11862 init_offsets_alloc_err:
11863 kfree(bp->init_ops);
11864 init_ops_alloc_err:
11865 kfree(bp->init_data);
11866 request_firmware_exit:
11867 release_firmware(bp->firmware);
11874 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11875 const struct pci_device_id *ent)
11877 struct net_device *dev = NULL;
11879 int pcie_width, pcie_speed;
11882 /* dev zeroed in init_etherdev */
11883 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11885 printk(KERN_ERR PFX "Cannot allocate net device\n");
11889 bp = netdev_priv(dev);
11890 bp->msglevel = debug;
11892 pci_set_drvdata(pdev, dev);
11894 rc = bnx2x_init_dev(pdev, dev);
11900 rc = bnx2x_init_bp(bp);
11902 goto init_one_exit;
11904 /* Set init arrays */
11905 rc = bnx2x_init_firmware(bp, &pdev->dev);
11907 printk(KERN_ERR PFX "Error loading firmware\n");
11908 goto init_one_exit;
11911 rc = register_netdev(dev);
11913 dev_err(&pdev->dev, "Cannot register net device\n");
11914 goto init_one_exit;
11917 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11918 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11919 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11920 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11921 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
11922 dev->base_addr, bp->pdev->irq);
11923 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11929 iounmap(bp->regview);
11932 iounmap(bp->doorbells);
11936 if (atomic_read(&pdev->enable_cnt) == 1)
11937 pci_release_regions(pdev);
11939 pci_disable_device(pdev);
11940 pci_set_drvdata(pdev, NULL);
11945 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11947 struct net_device *dev = pci_get_drvdata(pdev);
11951 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11954 bp = netdev_priv(dev);
11956 unregister_netdev(dev);
11958 kfree(bp->init_ops_offsets);
11959 kfree(bp->init_ops);
11960 kfree(bp->init_data);
11961 release_firmware(bp->firmware);
11964 iounmap(bp->regview);
11967 iounmap(bp->doorbells);
11971 if (atomic_read(&pdev->enable_cnt) == 1)
11972 pci_release_regions(pdev);
11974 pci_disable_device(pdev);
11975 pci_set_drvdata(pdev, NULL);
11978 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11980 struct net_device *dev = pci_get_drvdata(pdev);
11984 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11987 bp = netdev_priv(dev);
11991 pci_save_state(pdev);
11993 if (!netif_running(dev)) {
11998 netif_device_detach(dev);
12000 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12002 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12009 static int bnx2x_resume(struct pci_dev *pdev)
12011 struct net_device *dev = pci_get_drvdata(pdev);
12016 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12019 bp = netdev_priv(dev);
12023 pci_restore_state(pdev);
12025 if (!netif_running(dev)) {
12030 bnx2x_set_power_state(bp, PCI_D0);
12031 netif_device_attach(dev);
12033 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12040 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12044 bp->state = BNX2X_STATE_ERROR;
12046 bp->rx_mode = BNX2X_RX_MODE_NONE;
12048 bnx2x_netif_stop(bp, 0);
12050 del_timer_sync(&bp->timer);
12051 bp->stats_state = STATS_STATE_DISABLED;
12052 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12055 bnx2x_free_irq(bp);
12057 if (CHIP_IS_E1(bp)) {
12058 struct mac_configuration_cmd *config =
12059 bnx2x_sp(bp, mcast_config);
12061 for (i = 0; i < config->hdr.length; i++)
12062 CAM_INVALIDATE(config->config_table[i]);
12065 /* Free SKBs, SGEs, TPA pool and driver internals */
12066 bnx2x_free_skbs(bp);
12067 for_each_rx_queue(bp, i)
12068 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12069 for_each_rx_queue(bp, i)
12070 netif_napi_del(&bnx2x_fp(bp, i, napi));
12071 bnx2x_free_mem(bp);
12073 bp->state = BNX2X_STATE_CLOSED;
12075 netif_carrier_off(bp->dev);
12080 static void bnx2x_eeh_recover(struct bnx2x *bp)
12084 mutex_init(&bp->port.phy_mutex);
12086 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12087 bp->link_params.shmem_base = bp->common.shmem_base;
12088 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12090 if (!bp->common.shmem_base ||
12091 (bp->common.shmem_base < 0xA0000) ||
12092 (bp->common.shmem_base >= 0xC0000)) {
12093 BNX2X_DEV_INFO("MCP not active\n");
12094 bp->flags |= NO_MCP_FLAG;
12098 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12099 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12100 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12101 BNX2X_ERR("BAD MCP validity signature\n");
12103 if (!BP_NOMCP(bp)) {
12104 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12105 & DRV_MSG_SEQ_NUMBER_MASK);
12106 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12111 * bnx2x_io_error_detected - called when PCI error is detected
12112 * @pdev: Pointer to PCI device
12113 * @state: The current pci connection state
12115 * This function is called after a PCI bus error affecting
12116 * this device has been detected.
12118 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12119 pci_channel_state_t state)
12121 struct net_device *dev = pci_get_drvdata(pdev);
12122 struct bnx2x *bp = netdev_priv(dev);
12126 netif_device_detach(dev);
12128 if (state == pci_channel_io_perm_failure) {
12130 return PCI_ERS_RESULT_DISCONNECT;
12133 if (netif_running(dev))
12134 bnx2x_eeh_nic_unload(bp);
12136 pci_disable_device(pdev);
12140 /* Request a slot reset */
12141 return PCI_ERS_RESULT_NEED_RESET;
12145 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12146 * @pdev: Pointer to PCI device
12148 * Restart the card from scratch, as if from a cold-boot.
12150 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12152 struct net_device *dev = pci_get_drvdata(pdev);
12153 struct bnx2x *bp = netdev_priv(dev);
12157 if (pci_enable_device(pdev)) {
12158 dev_err(&pdev->dev,
12159 "Cannot re-enable PCI device after reset\n");
12161 return PCI_ERS_RESULT_DISCONNECT;
12164 pci_set_master(pdev);
12165 pci_restore_state(pdev);
12167 if (netif_running(dev))
12168 bnx2x_set_power_state(bp, PCI_D0);
12172 return PCI_ERS_RESULT_RECOVERED;
12176 * bnx2x_io_resume - called when traffic can start flowing again
12177 * @pdev: Pointer to PCI device
12179 * This callback is called when the error recovery driver tells us that
12180 * its OK to resume normal operation.
12182 static void bnx2x_io_resume(struct pci_dev *pdev)
12184 struct net_device *dev = pci_get_drvdata(pdev);
12185 struct bnx2x *bp = netdev_priv(dev);
12189 bnx2x_eeh_recover(bp);
12191 if (netif_running(dev))
12192 bnx2x_nic_load(bp, LOAD_NORMAL);
12194 netif_device_attach(dev);
12199 static struct pci_error_handlers bnx2x_err_handler = {
12200 .error_detected = bnx2x_io_error_detected,
12201 .slot_reset = bnx2x_io_slot_reset,
12202 .resume = bnx2x_io_resume,
12205 static struct pci_driver bnx2x_pci_driver = {
12206 .name = DRV_MODULE_NAME,
12207 .id_table = bnx2x_pci_tbl,
12208 .probe = bnx2x_init_one,
12209 .remove = __devexit_p(bnx2x_remove_one),
12210 .suspend = bnx2x_suspend,
12211 .resume = bnx2x_resume,
12212 .err_handler = &bnx2x_err_handler,
12215 static int __init bnx2x_init(void)
12219 printk(KERN_INFO "%s", version);
12221 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12222 if (bnx2x_wq == NULL) {
12223 printk(KERN_ERR PFX "Cannot create workqueue\n");
12227 ret = pci_register_driver(&bnx2x_pci_driver);
12229 printk(KERN_ERR PFX "Cannot register driver\n");
12230 destroy_workqueue(bnx2x_wq);
12235 static void __exit bnx2x_cleanup(void)
12237 pci_unregister_driver(&bnx2x_pci_driver);
12239 destroy_workqueue(bnx2x_wq);
12242 module_init(bnx2x_init);
12243 module_exit(bnx2x_cleanup);