1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.52.1"
60 #define DRV_MODULE_RELDATE "2009/08/12"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
154 * locking is done by mcp
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
176 static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
203 struct dmae_command dmae;
204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 memset(&dmae, 0, sizeof(struct dmae_command));
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 DMAE_CMD_ENDIANITY_DW_SWAP |
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248 mutex_lock(&bp->dmae_mutex);
252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
260 BNX2X_ERR("DMAE timeout!\n");
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
271 mutex_unlock(&bp->dmae_mutex);
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
276 struct dmae_command dmae;
277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 memset(&dmae, 0, sizeof(struct dmae_command));
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
299 DMAE_CMD_ENDIANITY_DW_SWAP |
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
320 mutex_lock(&bp->dmae_mutex);
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329 while (*wb_comp != DMAE_COMP_VAL) {
332 BNX2X_ERR("DMAE timeout!\n");
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
346 mutex_unlock(&bp->dmae_mutex);
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 REG_RD_DMAE(bp, reg, wb_data, 2);
381 return HILO_U64(wb_data[0], wb_data[1]);
385 static int bnx2x_mc_assert(struct bnx2x *bp)
389 u32 row0, row1, row2, row3;
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
506 static void bnx2x_fw_dump(struct bnx2x *bp)
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513 mark = ((mark + 0x3) & ~0x3);
514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
516 printk(KERN_ERR PFX);
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
522 printk(KERN_CONT "%s", (char *)data);
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
529 printk(KERN_CONT "%s", (char *)data);
531 printk(KERN_ERR PFX "end of fw dump\n");
534 static void bnx2x_panic_dump(struct bnx2x *bp)
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
542 BNX2X_ERR("begin crash dump -----------------\n");
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
553 for_each_rx_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i];
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
559 i, fp->rx_bd_prod, fp->rx_bd_cons,
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579 fp->status_blk->c_status_block.status_block_index,
580 fp->tx_db.data.prod);
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590 for (j = start; j != end; j = RX_BD(j + 1)) {
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
600 for (j = start; j != end; j = RX_SGE(j + 1)) {
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 BNX2X_ERR("end crash dump -----------------\n");
646 static void bnx2x_int_enable(struct bnx2x *bp)
648 int port = BP_PORT(bp);
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
673 REG_WR(bp, addr, val);
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
681 REG_WR(bp, addr, val);
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
693 /* enable nig and gpio3 attention */
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
702 /* Make sure that interrupts are indeed enabled from here on */
706 static void bnx2x_int_disable(struct bnx2x *bp)
708 int port = BP_PORT(bp);
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
720 /* flush all outstanding writes */
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
733 /* disable interrupt handling */
734 atomic_inc(&bp->intr_sem);
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
741 /* make sure all ISRs are done */
743 synchronize_irq(bp->msix_table[0].vector);
748 for_each_queue(bp, i)
749 synchronize_irq(bp->msix_table[i + offset].vector);
751 synchronize_irq(bp->pdev->irq);
753 /* make sure sp_task is not running */
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
761 * General service functions
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765 u8 storm, u16 index, u8 op, u8 update)
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
769 struct igu_ack_register igu_ack;
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
782 /* Make sure that ACK is written */
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
789 struct host_status_block *fpsb = fp->status_blk;
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
818 * fast path service functions
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
823 /* Tell compiler that consumer and producer can change */
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
828 /* free skb in the packet ring at pos idx
829 * return idx of last bd freed
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
837 struct sk_buff *skb = tx_buf->skb;
838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853 BNX2X_ERR("BAD nbd!\n");
857 new_cons = nbd + tx_buf->first_bd;
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862 /* Skip a parse bd... */
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
885 dev_kfree_skb_any(skb);
886 tx_buf->first_bd = 0;
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
898 barrier(); /* Tell compiler that prod and cons can change */
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
906 #ifdef BNX2X_STOP_ON_ERROR
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
912 return (s16)(fp->bp->tx_ring_size) - used;
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
917 struct bnx2x *bp = fp->bp;
918 struct netdev_queue *txq;
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
922 #ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
931 while (sw_cons != hw_cons) {
934 pkt_cons = TX_BD(sw_cons);
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
939 hw_cons, sw_cons, pkt_cons);
941 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
954 /* TBD need a thresh? */
955 if (unlikely(netif_tx_queue_stopped(txq))) {
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
965 if ((netif_tx_queue_stopped(txq)) &&
966 (bp->state == BNX2X_STATE_OPEN) &&
967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968 netif_tx_wake_queue(txq);
973 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
974 union eth_rx_cqe *rr_cqe)
976 struct bnx2x *bp = fp->bp;
977 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
978 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
982 fp->index, cid, command, bp->state,
983 rr_cqe->ramrod_cqe.ramrod_type);
988 switch (command | fp->state) {
989 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
990 BNX2X_FP_STATE_OPENING):
991 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
993 fp->state = BNX2X_FP_STATE_OPEN;
996 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
997 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
999 fp->state = BNX2X_FP_STATE_HALTED;
1003 BNX2X_ERR("unexpected MC reply (%d) "
1004 "fp->state is %x\n", command, fp->state);
1007 mb(); /* force bnx2x_wait_ramrod() to see the change */
1011 switch (command | bp->state) {
1012 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1013 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1014 bp->state = BNX2X_STATE_OPEN;
1017 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1019 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1020 fp->state = BNX2X_FP_STATE_HALTED;
1023 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1024 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1025 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1029 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1030 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1031 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1032 bp->set_mac_pending--;
1036 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1037 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1038 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1039 bp->set_mac_pending--;
1044 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1045 command, bp->state);
1048 mb(); /* force bnx2x_wait_ramrod() to see the change */
1051 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1052 struct bnx2x_fastpath *fp, u16 index)
1054 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1055 struct page *page = sw_buf->page;
1056 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1058 /* Skip "next page" elements */
1062 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1063 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1064 __free_pages(page, PAGES_PER_SGE_SHIFT);
1066 sw_buf->page = NULL;
1071 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1072 struct bnx2x_fastpath *fp, int last)
1076 for (i = 0; i < last; i++)
1077 bnx2x_free_rx_sge(bp, fp, i);
1080 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1081 struct bnx2x_fastpath *fp, u16 index)
1083 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1084 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1085 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1088 if (unlikely(page == NULL))
1091 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1092 PCI_DMA_FROMDEVICE);
1093 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1094 __free_pages(page, PAGES_PER_SGE_SHIFT);
1098 sw_buf->page = page;
1099 pci_unmap_addr_set(sw_buf, mapping, mapping);
1101 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1102 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1107 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1108 struct bnx2x_fastpath *fp, u16 index)
1110 struct sk_buff *skb;
1111 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1112 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1115 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1116 if (unlikely(skb == NULL))
1119 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1120 PCI_DMA_FROMDEVICE);
1121 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1127 pci_unmap_addr_set(rx_buf, mapping, mapping);
1129 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1130 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1135 /* note that we are not allocating a new skb,
1136 * we are just moving one from cons to prod
1137 * we are not creating a new mapping,
1138 * so there is no need to check for dma_mapping_error().
1140 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1141 struct sk_buff *skb, u16 cons, u16 prod)
1143 struct bnx2x *bp = fp->bp;
1144 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1145 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1146 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1147 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1149 pci_dma_sync_single_for_device(bp->pdev,
1150 pci_unmap_addr(cons_rx_buf, mapping),
1151 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1153 prod_rx_buf->skb = cons_rx_buf->skb;
1154 pci_unmap_addr_set(prod_rx_buf, mapping,
1155 pci_unmap_addr(cons_rx_buf, mapping));
1156 *prod_bd = *cons_bd;
1159 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1162 u16 last_max = fp->last_max_sge;
1164 if (SUB_S16(idx, last_max) > 0)
1165 fp->last_max_sge = idx;
1168 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1172 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1173 int idx = RX_SGE_CNT * i - 1;
1175 for (j = 0; j < 2; j++) {
1176 SGE_MASK_CLEAR_BIT(fp, idx);
1182 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1183 struct eth_fast_path_rx_cqe *fp_cqe)
1185 struct bnx2x *bp = fp->bp;
1186 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1187 le16_to_cpu(fp_cqe->len_on_bd)) >>
1189 u16 last_max, last_elem, first_elem;
1196 /* First mark all used pages */
1197 for (i = 0; i < sge_len; i++)
1198 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1200 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1201 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1203 /* Here we assume that the last SGE index is the biggest */
1204 prefetch((void *)(fp->sge_mask));
1205 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1207 last_max = RX_SGE(fp->last_max_sge);
1208 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1209 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1211 /* If ring is not full */
1212 if (last_elem + 1 != first_elem)
1215 /* Now update the prod */
1216 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1217 if (likely(fp->sge_mask[i]))
1220 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1221 delta += RX_SGE_MASK_ELEM_SZ;
1225 fp->rx_sge_prod += delta;
1226 /* clear page-end entries */
1227 bnx2x_clear_sge_mask_next_elems(fp);
1230 DP(NETIF_MSG_RX_STATUS,
1231 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1232 fp->last_max_sge, fp->rx_sge_prod);
1235 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1237 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1238 memset(fp->sge_mask, 0xff,
1239 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1241 /* Clear the two last indices in the page to 1:
1242 these are the indices that correspond to the "next" element,
1243 hence will never be indicated and should be removed from
1244 the calculations. */
1245 bnx2x_clear_sge_mask_next_elems(fp);
1248 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1249 struct sk_buff *skb, u16 cons, u16 prod)
1251 struct bnx2x *bp = fp->bp;
1252 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1253 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1254 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1257 /* move empty skb from pool to prod and map it */
1258 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1259 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1260 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1261 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1263 /* move partial skb from cons to pool (don't unmap yet) */
1264 fp->tpa_pool[queue] = *cons_rx_buf;
1266 /* mark bin state as start - print error if current state != stop */
1267 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1268 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1270 fp->tpa_state[queue] = BNX2X_TPA_START;
1272 /* point prod_bd to new skb */
1273 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1274 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1276 #ifdef BNX2X_STOP_ON_ERROR
1277 fp->tpa_queue_used |= (1 << queue);
1278 #ifdef __powerpc64__
1279 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1281 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1283 fp->tpa_queue_used);
1287 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1288 struct sk_buff *skb,
1289 struct eth_fast_path_rx_cqe *fp_cqe,
1292 struct sw_rx_page *rx_pg, old_rx_pg;
1293 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1294 u32 i, frag_len, frag_size, pages;
1298 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1299 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1301 /* This is needed in order to enable forwarding support */
1303 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1304 max(frag_size, (u32)len_on_bd));
1306 #ifdef BNX2X_STOP_ON_ERROR
1308 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1309 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1311 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1312 fp_cqe->pkt_len, len_on_bd);
1318 /* Run through the SGL and compose the fragmented skb */
1319 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1320 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1322 /* FW gives the indices of the SGE as if the ring is an array
1323 (meaning that "next" element will consume 2 indices) */
1324 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1325 rx_pg = &fp->rx_page_ring[sge_idx];
1328 /* If we fail to allocate a substitute page, we simply stop
1329 where we are and drop the whole packet */
1330 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1331 if (unlikely(err)) {
1332 fp->eth_q_stats.rx_skb_alloc_failed++;
1336 /* Unmap the page as we r going to pass it to the stack */
1337 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1338 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1340 /* Add one frag and update the appropriate fields in the skb */
1341 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1343 skb->data_len += frag_len;
1344 skb->truesize += frag_len;
1345 skb->len += frag_len;
1347 frag_size -= frag_len;
1353 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1354 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1357 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1358 struct sk_buff *skb = rx_buf->skb;
1360 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1362 /* Unmap skb in the pool anyway, as we are going to change
1363 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1365 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1366 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1368 if (likely(new_skb)) {
1369 /* fix ip xsum and give it to the stack */
1370 /* (no need to map the new skb) */
1373 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1374 PARSING_FLAGS_VLAN);
1375 int is_not_hwaccel_vlan_cqe =
1376 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1380 prefetch(((char *)(skb)) + 128);
1382 #ifdef BNX2X_STOP_ON_ERROR
1383 if (pad + len > bp->rx_buf_size) {
1384 BNX2X_ERR("skb_put is about to fail... "
1385 "pad %d len %d rx_buf_size %d\n",
1386 pad, len, bp->rx_buf_size);
1392 skb_reserve(skb, pad);
1395 skb->protocol = eth_type_trans(skb, bp->dev);
1396 skb->ip_summed = CHECKSUM_UNNECESSARY;
1401 iph = (struct iphdr *)skb->data;
1403 /* If there is no Rx VLAN offloading -
1404 take VLAN tag into an account */
1405 if (unlikely(is_not_hwaccel_vlan_cqe))
1406 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1409 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1412 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1413 &cqe->fast_path_cqe, cqe_idx)) {
1415 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1416 (!is_not_hwaccel_vlan_cqe))
1417 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1418 le16_to_cpu(cqe->fast_path_cqe.
1422 netif_receive_skb(skb);
1424 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1425 " - dropping packet!\n");
1430 /* put new skb in bin */
1431 fp->tpa_pool[queue].skb = new_skb;
1434 /* else drop the packet and keep the buffer in the bin */
1435 DP(NETIF_MSG_RX_STATUS,
1436 "Failed to allocate new skb - dropping packet!\n");
1437 fp->eth_q_stats.rx_skb_alloc_failed++;
1440 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1443 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1444 struct bnx2x_fastpath *fp,
1445 u16 bd_prod, u16 rx_comp_prod,
1448 struct ustorm_eth_rx_producers rx_prods = {0};
1451 /* Update producers */
1452 rx_prods.bd_prod = bd_prod;
1453 rx_prods.cqe_prod = rx_comp_prod;
1454 rx_prods.sge_prod = rx_sge_prod;
1457 * Make sure that the BD and SGE data is updated before updating the
1458 * producers since FW might read the BD/SGE right after the producer
1460 * This is only applicable for weak-ordered memory model archs such
1461 * as IA-64. The following barrier is also mandatory since FW will
1462 * assumes BDs must have buffers.
1466 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1467 REG_WR(bp, BAR_USTRORM_INTMEM +
1468 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1469 ((u32 *)&rx_prods)[i]);
1471 mmiowb(); /* keep prod updates ordered */
1473 DP(NETIF_MSG_RX_STATUS,
1474 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1475 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1478 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1480 struct bnx2x *bp = fp->bp;
1481 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1482 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1485 #ifdef BNX2X_STOP_ON_ERROR
1486 if (unlikely(bp->panic))
1490 /* CQ "next element" is of the size of the regular element,
1491 that's why it's ok here */
1492 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1493 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1496 bd_cons = fp->rx_bd_cons;
1497 bd_prod = fp->rx_bd_prod;
1498 bd_prod_fw = bd_prod;
1499 sw_comp_cons = fp->rx_comp_cons;
1500 sw_comp_prod = fp->rx_comp_prod;
1502 /* Memory barrier necessary as speculative reads of the rx
1503 * buffer can be ahead of the index in the status block
1507 DP(NETIF_MSG_RX_STATUS,
1508 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1509 fp->index, hw_comp_cons, sw_comp_cons);
1511 while (sw_comp_cons != hw_comp_cons) {
1512 struct sw_rx_bd *rx_buf = NULL;
1513 struct sk_buff *skb;
1514 union eth_rx_cqe *cqe;
1518 comp_ring_cons = RCQ_BD(sw_comp_cons);
1519 bd_prod = RX_BD(bd_prod);
1520 bd_cons = RX_BD(bd_cons);
1522 /* Prefetch the page containing the BD descriptor
1523 at producer's index. It will be needed when new skb is
1525 prefetch((void *)(PAGE_ALIGN((unsigned long)
1526 (&fp->rx_desc_ring[bd_prod])) -
1529 cqe = &fp->rx_comp_ring[comp_ring_cons];
1530 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1532 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1533 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1534 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1535 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1536 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1537 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1539 /* is this a slowpath msg? */
1540 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1541 bnx2x_sp_event(fp, cqe);
1544 /* this is an rx packet */
1546 rx_buf = &fp->rx_buf_ring[bd_cons];
1548 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1549 pad = cqe->fast_path_cqe.placement_offset;
1551 /* If CQE is marked both TPA_START and TPA_END
1552 it is a non-TPA CQE */
1553 if ((!fp->disable_tpa) &&
1554 (TPA_TYPE(cqe_fp_flags) !=
1555 (TPA_TYPE_START | TPA_TYPE_END))) {
1556 u16 queue = cqe->fast_path_cqe.queue_index;
1558 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1559 DP(NETIF_MSG_RX_STATUS,
1560 "calling tpa_start on queue %d\n",
1563 bnx2x_tpa_start(fp, queue, skb,
1568 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1569 DP(NETIF_MSG_RX_STATUS,
1570 "calling tpa_stop on queue %d\n",
1573 if (!BNX2X_RX_SUM_FIX(cqe))
1574 BNX2X_ERR("STOP on none TCP "
1577 /* This is a size of the linear data
1579 len = le16_to_cpu(cqe->fast_path_cqe.
1581 bnx2x_tpa_stop(bp, fp, queue, pad,
1582 len, cqe, comp_ring_cons);
1583 #ifdef BNX2X_STOP_ON_ERROR
1588 bnx2x_update_sge_prod(fp,
1589 &cqe->fast_path_cqe);
1594 pci_dma_sync_single_for_device(bp->pdev,
1595 pci_unmap_addr(rx_buf, mapping),
1596 pad + RX_COPY_THRESH,
1597 PCI_DMA_FROMDEVICE);
1599 prefetch(((char *)(skb)) + 128);
1601 /* is this an error packet? */
1602 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1603 DP(NETIF_MSG_RX_ERR,
1604 "ERROR flags %x rx packet %u\n",
1605 cqe_fp_flags, sw_comp_cons);
1606 fp->eth_q_stats.rx_err_discard_pkt++;
1610 /* Since we don't have a jumbo ring
1611 * copy small packets if mtu > 1500
1613 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1614 (len <= RX_COPY_THRESH)) {
1615 struct sk_buff *new_skb;
1617 new_skb = netdev_alloc_skb(bp->dev,
1619 if (new_skb == NULL) {
1620 DP(NETIF_MSG_RX_ERR,
1621 "ERROR packet dropped "
1622 "because of alloc failure\n");
1623 fp->eth_q_stats.rx_skb_alloc_failed++;
1628 skb_copy_from_linear_data_offset(skb, pad,
1629 new_skb->data + pad, len);
1630 skb_reserve(new_skb, pad);
1631 skb_put(new_skb, len);
1633 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1638 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1639 pci_unmap_single(bp->pdev,
1640 pci_unmap_addr(rx_buf, mapping),
1642 PCI_DMA_FROMDEVICE);
1643 skb_reserve(skb, pad);
1647 DP(NETIF_MSG_RX_ERR,
1648 "ERROR packet dropped because "
1649 "of alloc failure\n");
1650 fp->eth_q_stats.rx_skb_alloc_failed++;
1652 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1656 skb->protocol = eth_type_trans(skb, bp->dev);
1658 skb->ip_summed = CHECKSUM_NONE;
1660 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1661 skb->ip_summed = CHECKSUM_UNNECESSARY;
1663 fp->eth_q_stats.hw_csum_err++;
1667 skb_record_rx_queue(skb, fp->index);
1670 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1671 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1672 PARSING_FLAGS_VLAN))
1673 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1674 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1677 netif_receive_skb(skb);
1683 bd_cons = NEXT_RX_IDX(bd_cons);
1684 bd_prod = NEXT_RX_IDX(bd_prod);
1685 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1688 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1689 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1691 if (rx_pkt == budget)
1695 fp->rx_bd_cons = bd_cons;
1696 fp->rx_bd_prod = bd_prod_fw;
1697 fp->rx_comp_cons = sw_comp_cons;
1698 fp->rx_comp_prod = sw_comp_prod;
1700 /* Update producers */
1701 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1704 fp->rx_pkt += rx_pkt;
1710 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1712 struct bnx2x_fastpath *fp = fp_cookie;
1713 struct bnx2x *bp = fp->bp;
1715 /* Return here if interrupt is disabled */
1716 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1717 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1721 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1722 fp->index, fp->sb_id);
1723 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1725 #ifdef BNX2X_STOP_ON_ERROR
1726 if (unlikely(bp->panic))
1729 /* Handle Rx or Tx according to MSI-X vector */
1730 if (fp->is_rx_queue) {
1731 prefetch(fp->rx_cons_sb);
1732 prefetch(&fp->status_blk->u_status_block.status_block_index);
1734 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1737 prefetch(fp->tx_cons_sb);
1738 prefetch(&fp->status_blk->c_status_block.status_block_index);
1740 bnx2x_update_fpsb_idx(fp);
1744 /* Re-enable interrupts */
1745 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1746 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1747 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1748 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1754 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1756 struct bnx2x *bp = netdev_priv(dev_instance);
1757 u16 status = bnx2x_ack_int(bp);
1761 /* Return here if interrupt is shared and it's not for us */
1762 if (unlikely(status == 0)) {
1763 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1766 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1768 /* Return here if interrupt is disabled */
1769 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1770 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774 #ifdef BNX2X_STOP_ON_ERROR
1775 if (unlikely(bp->panic))
1779 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1780 struct bnx2x_fastpath *fp = &bp->fp[i];
1782 mask = 0x2 << fp->sb_id;
1783 if (status & mask) {
1784 /* Handle Rx or Tx according to SB id */
1785 if (fp->is_rx_queue) {
1786 prefetch(fp->rx_cons_sb);
1787 prefetch(&fp->status_blk->u_status_block.
1788 status_block_index);
1790 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1793 prefetch(fp->tx_cons_sb);
1794 prefetch(&fp->status_blk->c_status_block.
1795 status_block_index);
1797 bnx2x_update_fpsb_idx(fp);
1801 /* Re-enable interrupts */
1802 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1803 le16_to_cpu(fp->fp_u_idx),
1805 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1806 le16_to_cpu(fp->fp_c_idx),
1814 if (unlikely(status & 0x1)) {
1815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1829 /* end of fast path */
1831 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1836 * General service functions
1839 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1842 u32 resource_bit = (1 << resource);
1843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1862 /* Validating that the resource is not already taken */
1863 lock_status = REG_RD(bp, hw_lock_control_reg);
1864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
1872 /* Try to acquire the lock */
1873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
1875 if (lock_status & resource_bit)
1880 DP(NETIF_MSG_HW, "Timeout\n");
1884 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1887 u32 resource_bit = (1 << resource);
1888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1906 /* Validating that the resource is currently taken */
1907 lock_status = REG_RD(bp, hw_lock_control_reg);
1908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1914 REG_WR(bp, hw_lock_control_reg, resource_bit);
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1921 mutex_lock(&bp->port.phy_mutex);
1923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1927 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1932 mutex_unlock(&bp->port.phy_mutex);
1935 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1965 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2018 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2064 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2066 u32 spio_mask = (1 << spio_num);
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2110 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2125 bp->port.advertising |= ADVERTISED_Asym_Pause;
2129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2135 static void bnx2x_link_report(struct bnx2x *bp)
2137 if (bp->state == BNX2X_STATE_DISABLED) {
2138 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2143 if (bp->link_vars.link_up) {
2144 if (bp->state == BNX2X_STATE_OPEN)
2145 netif_carrier_on(bp->dev);
2146 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2148 printk("%d Mbps ", bp->link_vars.line_speed);
2150 if (bp->link_vars.duplex == DUPLEX_FULL)
2151 printk("full duplex");
2153 printk("half duplex");
2155 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2156 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2157 printk(", receive ");
2158 if (bp->link_vars.flow_ctrl &
2160 printk("& transmit ");
2162 printk(", transmit ");
2164 printk("flow control ON");
2168 } else { /* link_down */
2169 netif_carrier_off(bp->dev);
2170 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2174 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2176 if (!BP_NOMCP(bp)) {
2179 /* Initialize link parameters structure variables */
2180 /* It is recommended to turn off RX FC for jumbo frames
2181 for better performance */
2182 if (bp->dev->mtu > 5000)
2183 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2185 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2187 bnx2x_acquire_phy_lock(bp);
2189 if (load_mode == LOAD_DIAG)
2190 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2192 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2194 bnx2x_release_phy_lock(bp);
2196 bnx2x_calc_fc_adv(bp);
2198 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2199 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2200 bnx2x_link_report(bp);
2205 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2209 static void bnx2x_link_set(struct bnx2x *bp)
2211 if (!BP_NOMCP(bp)) {
2212 bnx2x_acquire_phy_lock(bp);
2213 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2214 bnx2x_release_phy_lock(bp);
2216 bnx2x_calc_fc_adv(bp);
2218 BNX2X_ERR("Bootcode is missing - can not set link\n");
2221 static void bnx2x__link_reset(struct bnx2x *bp)
2223 if (!BP_NOMCP(bp)) {
2224 bnx2x_acquire_phy_lock(bp);
2225 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2226 bnx2x_release_phy_lock(bp);
2228 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2231 static u8 bnx2x_link_test(struct bnx2x *bp)
2235 bnx2x_acquire_phy_lock(bp);
2236 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2237 bnx2x_release_phy_lock(bp);
2242 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2244 u32 r_param = bp->link_vars.line_speed / 8;
2245 u32 fair_periodic_timeout_usec;
2248 memset(&(bp->cmng.rs_vars), 0,
2249 sizeof(struct rate_shaping_vars_per_port));
2250 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2252 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2253 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2255 /* this is the threshold below which no timer arming will occur
2256 1.25 coefficient is for the threshold to be a little bigger
2257 than the real time, to compensate for timer in-accuracy */
2258 bp->cmng.rs_vars.rs_threshold =
2259 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2261 /* resolution of fairness timer */
2262 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2263 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2264 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2266 /* this is the threshold below which we won't arm the timer anymore */
2267 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2269 /* we multiply by 1e3/8 to get bytes/msec.
2270 We don't want the credits to pass a credit
2271 of the t_fair*FAIR_MEM (algorithm resolution) */
2272 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2273 /* since each tick is 4 usec */
2274 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2277 /* Calculates the sum of vn_min_rates.
2278 It's needed for further normalizing of the min_rates.
2280 sum of vn_min_rates.
2282 0 - if all the min_rates are 0.
2283 In the later case fainess algorithm should be deactivated.
2284 If not all min_rates are zero then those that are zeroes will be set to 1.
2286 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2289 int port = BP_PORT(bp);
2292 bp->vn_weight_sum = 0;
2293 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2294 int func = 2*vn + port;
2295 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2296 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2297 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2299 /* Skip hidden vns */
2300 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2303 /* If min rate is zero - set it to 1 */
2305 vn_min_rate = DEF_MIN_RATE;
2309 bp->vn_weight_sum += vn_min_rate;
2312 /* ... only if all min rates are zeros - disable fairness */
2314 bp->vn_weight_sum = 0;
2317 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2319 struct rate_shaping_vars_per_vn m_rs_vn;
2320 struct fairness_vars_per_vn m_fair_vn;
2321 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2322 u16 vn_min_rate, vn_max_rate;
2325 /* If function is hidden - set min and max to zeroes */
2326 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2331 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2332 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2333 /* If fairness is enabled (not all min rates are zeroes) and
2334 if current min rate is zero - set it to 1.
2335 This is a requirement of the algorithm. */
2336 if (bp->vn_weight_sum && (vn_min_rate == 0))
2337 vn_min_rate = DEF_MIN_RATE;
2338 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2339 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2343 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2344 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2346 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2347 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2349 /* global vn counter - maximal Mbps for this vn */
2350 m_rs_vn.vn_counter.rate = vn_max_rate;
2352 /* quota - number of bytes transmitted in this period */
2353 m_rs_vn.vn_counter.quota =
2354 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2356 if (bp->vn_weight_sum) {
2357 /* credit for each period of the fairness algorithm:
2358 number of bytes in T_FAIR (the vn share the port rate).
2359 vn_weight_sum should not be larger than 10000, thus
2360 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2362 m_fair_vn.vn_credit_delta =
2363 max((u32)(vn_min_rate * (T_FAIR_COEF /
2364 (8 * bp->vn_weight_sum))),
2365 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2366 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2367 m_fair_vn.vn_credit_delta);
2370 /* Store it to internal memory */
2371 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2372 REG_WR(bp, BAR_XSTRORM_INTMEM +
2373 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2374 ((u32 *)(&m_rs_vn))[i]);
2376 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2377 REG_WR(bp, BAR_XSTRORM_INTMEM +
2378 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2379 ((u32 *)(&m_fair_vn))[i]);
2383 /* This function is called upon link interrupt */
2384 static void bnx2x_link_attn(struct bnx2x *bp)
2386 /* Make sure that we are synced with the current statistics */
2387 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2389 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2391 if (bp->link_vars.link_up) {
2393 /* dropless flow control */
2394 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2395 int port = BP_PORT(bp);
2396 u32 pause_enabled = 0;
2398 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2401 REG_WR(bp, BAR_USTRORM_INTMEM +
2402 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2406 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2407 struct host_port_stats *pstats;
2409 pstats = bnx2x_sp(bp, port_stats);
2410 /* reset old bmac stats */
2411 memset(&(pstats->mac_stx[0]), 0,
2412 sizeof(struct mac_stx));
2414 if ((bp->state == BNX2X_STATE_OPEN) ||
2415 (bp->state == BNX2X_STATE_DISABLED))
2416 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2419 /* indicate link status */
2420 bnx2x_link_report(bp);
2423 int port = BP_PORT(bp);
2427 /* Set the attention towards other drivers on the same port */
2428 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2429 if (vn == BP_E1HVN(bp))
2432 func = ((vn << 1) | port);
2433 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2434 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2437 if (bp->link_vars.link_up) {
2440 /* Init rate shaping and fairness contexts */
2441 bnx2x_init_port_minmax(bp);
2443 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2444 bnx2x_init_vn_minmax(bp, 2*vn + port);
2446 /* Store it to internal memory */
2448 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2449 REG_WR(bp, BAR_XSTRORM_INTMEM +
2450 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2451 ((u32 *)(&bp->cmng))[i]);
2456 static void bnx2x__link_status_update(struct bnx2x *bp)
2458 int func = BP_FUNC(bp);
2460 if (bp->state != BNX2X_STATE_OPEN)
2463 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2465 if (bp->link_vars.link_up)
2466 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2468 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2470 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2471 bnx2x_calc_vn_weight_sum(bp);
2473 /* indicate link status */
2474 bnx2x_link_report(bp);
2477 static void bnx2x_pmf_update(struct bnx2x *bp)
2479 int port = BP_PORT(bp);
2483 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2485 /* enable nig attention */
2486 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2487 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2488 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2490 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2498 * General service functions
2501 /* send the MCP a request, block until there is a reply */
2502 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2504 int func = BP_FUNC(bp);
2505 u32 seq = ++bp->fw_seq;
2508 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2510 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2511 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2514 /* let the FW do it's magic ... */
2517 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2519 /* Give the FW up to 2 second (200*10ms) */
2520 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2522 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2523 cnt*delay, rc, seq);
2525 /* is this a reply to our command? */
2526 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2527 rc &= FW_MSG_CODE_MASK;
2530 BNX2X_ERR("FW failed to respond!\n");
2538 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2539 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2540 static void bnx2x_set_rx_mode(struct net_device *dev);
2542 static void bnx2x_e1h_disable(struct bnx2x *bp)
2544 int port = BP_PORT(bp);
2547 bp->rx_mode = BNX2X_RX_MODE_NONE;
2548 bnx2x_set_storm_rx_mode(bp);
2550 netif_tx_disable(bp->dev);
2551 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2553 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2555 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2557 for (i = 0; i < MC_HASH_SIZE; i++)
2558 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2560 netif_carrier_off(bp->dev);
2563 static void bnx2x_e1h_enable(struct bnx2x *bp)
2565 int port = BP_PORT(bp);
2567 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2569 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2571 /* Tx queue should be only reenabled */
2572 netif_tx_wake_all_queues(bp->dev);
2574 /* Initialize the receive filter. */
2575 bnx2x_set_rx_mode(bp->dev);
2578 static void bnx2x_update_min_max(struct bnx2x *bp)
2580 int port = BP_PORT(bp);
2583 /* Init rate shaping and fairness contexts */
2584 bnx2x_init_port_minmax(bp);
2586 bnx2x_calc_vn_weight_sum(bp);
2588 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2589 bnx2x_init_vn_minmax(bp, 2*vn + port);
2594 /* Set the attention towards other drivers on the same port */
2595 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2596 if (vn == BP_E1HVN(bp))
2599 func = ((vn << 1) | port);
2600 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2601 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2604 /* Store it to internal memory */
2605 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2606 REG_WR(bp, BAR_XSTRORM_INTMEM +
2607 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2608 ((u32 *)(&bp->cmng))[i]);
2612 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2614 int func = BP_FUNC(bp);
2616 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2617 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2621 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2622 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2623 bp->state = BNX2X_STATE_DISABLED;
2625 bnx2x_e1h_disable(bp);
2627 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2628 bp->state = BNX2X_STATE_OPEN;
2630 bnx2x_e1h_enable(bp);
2632 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2634 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2636 bnx2x_update_min_max(bp);
2637 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2640 /* Report results to MCP */
2642 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2644 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2647 /* must be called under the spq lock */
2648 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2650 struct eth_spe *next_spe = bp->spq_prod_bd;
2652 if (bp->spq_prod_bd == bp->spq_last_bd) {
2653 bp->spq_prod_bd = bp->spq;
2654 bp->spq_prod_idx = 0;
2655 DP(NETIF_MSG_TIMER, "end of spq\n");
2663 /* must be called under the spq lock */
2664 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2666 int func = BP_FUNC(bp);
2668 /* Make sure that BD data is updated before writing the producer */
2671 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2676 /* the slow path queue is odd since completions arrive on the fastpath ring */
2677 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2678 u32 data_hi, u32 data_lo, int common)
2680 struct eth_spe *spe;
2682 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2683 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2684 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2685 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2686 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2688 #ifdef BNX2X_STOP_ON_ERROR
2689 if (unlikely(bp->panic))
2693 spin_lock_bh(&bp->spq_lock);
2695 if (!bp->spq_left) {
2696 BNX2X_ERR("BUG! SPQ ring full!\n");
2697 spin_unlock_bh(&bp->spq_lock);
2702 spe = bnx2x_sp_get_next(bp);
2704 /* CID needs port number to be encoded int it */
2705 spe->hdr.conn_and_cmd_data =
2706 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2708 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2711 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2713 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2714 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2718 bnx2x_sp_prod_update(bp);
2719 spin_unlock_bh(&bp->spq_lock);
2723 /* acquire split MCP access lock register */
2724 static int bnx2x_acquire_alr(struct bnx2x *bp)
2731 for (j = 0; j < i*10; j++) {
2733 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2734 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2735 if (val & (1L << 31))
2740 if (!(val & (1L << 31))) {
2741 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2748 /* release split MCP access lock register */
2749 static void bnx2x_release_alr(struct bnx2x *bp)
2753 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2756 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2758 struct host_def_status_block *def_sb = bp->def_status_blk;
2761 barrier(); /* status block is written to by the chip */
2762 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2763 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2766 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2767 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2770 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2771 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2774 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2775 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2778 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2779 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2786 * slow path service functions
2789 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2791 int port = BP_PORT(bp);
2792 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2793 COMMAND_REG_ATTN_BITS_SET);
2794 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2795 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2796 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2797 NIG_REG_MASK_INTERRUPT_PORT0;
2801 if (bp->attn_state & asserted)
2802 BNX2X_ERR("IGU ERROR\n");
2804 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2805 aeu_mask = REG_RD(bp, aeu_addr);
2807 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2808 aeu_mask, asserted);
2809 aeu_mask &= ~(asserted & 0xff);
2810 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2812 REG_WR(bp, aeu_addr, aeu_mask);
2813 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2815 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2816 bp->attn_state |= asserted;
2817 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2819 if (asserted & ATTN_HARD_WIRED_MASK) {
2820 if (asserted & ATTN_NIG_FOR_FUNC) {
2822 bnx2x_acquire_phy_lock(bp);
2824 /* save nig interrupt mask */
2825 nig_mask = REG_RD(bp, nig_int_mask_addr);
2826 REG_WR(bp, nig_int_mask_addr, 0);
2828 bnx2x_link_attn(bp);
2830 /* handle unicore attn? */
2832 if (asserted & ATTN_SW_TIMER_4_FUNC)
2833 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2835 if (asserted & GPIO_2_FUNC)
2836 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2838 if (asserted & GPIO_3_FUNC)
2839 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2841 if (asserted & GPIO_4_FUNC)
2842 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2845 if (asserted & ATTN_GENERAL_ATTN_1) {
2846 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2847 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2849 if (asserted & ATTN_GENERAL_ATTN_2) {
2850 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2851 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2853 if (asserted & ATTN_GENERAL_ATTN_3) {
2854 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2855 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2858 if (asserted & ATTN_GENERAL_ATTN_4) {
2859 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2860 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2862 if (asserted & ATTN_GENERAL_ATTN_5) {
2863 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2864 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2866 if (asserted & ATTN_GENERAL_ATTN_6) {
2867 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2868 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2872 } /* if hardwired */
2874 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2876 REG_WR(bp, hc_addr, asserted);
2878 /* now set back the mask */
2879 if (asserted & ATTN_NIG_FOR_FUNC) {
2880 REG_WR(bp, nig_int_mask_addr, nig_mask);
2881 bnx2x_release_phy_lock(bp);
2885 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2887 int port = BP_PORT(bp);
2889 /* mark the failure */
2890 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2891 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2892 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2893 bp->link_params.ext_phy_config);
2895 /* log the failure */
2896 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2897 " the driver to shutdown the card to prevent permanent"
2898 " damage. Please contact Dell Support for assistance\n",
2902 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2904 int port = BP_PORT(bp);
2906 u32 val, swap_val, swap_override;
2908 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2909 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2911 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2913 val = REG_RD(bp, reg_offset);
2914 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2915 REG_WR(bp, reg_offset, val);
2917 BNX2X_ERR("SPIO5 hw attention\n");
2919 /* Fan failure attention */
2920 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2921 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2922 /* Low power mode is controlled by GPIO 2 */
2923 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2924 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2925 /* The PHY reset is controlled by GPIO 1 */
2926 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2927 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2930 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2931 /* The PHY reset is controlled by GPIO 1 */
2932 /* fake the port number to cancel the swap done in
2934 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2935 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2936 port = (swap_val && swap_override) ^ 1;
2937 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2938 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944 bnx2x_fan_failure(bp);
2947 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2948 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2949 bnx2x_acquire_phy_lock(bp);
2950 bnx2x_handle_module_detect_int(&bp->link_params);
2951 bnx2x_release_phy_lock(bp);
2954 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2956 val = REG_RD(bp, reg_offset);
2957 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2958 REG_WR(bp, reg_offset, val);
2960 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2961 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2966 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2970 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2972 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2973 BNX2X_ERR("DB hw attention 0x%x\n", val);
2974 /* DORQ discard attention */
2976 BNX2X_ERR("FATAL error from DORQ\n");
2979 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2981 int port = BP_PORT(bp);
2984 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2985 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2987 val = REG_RD(bp, reg_offset);
2988 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2989 REG_WR(bp, reg_offset, val);
2991 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2992 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2997 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3001 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3003 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3004 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3005 /* CFC error attention */
3007 BNX2X_ERR("FATAL error from CFC\n");
3010 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3012 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3013 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3014 /* RQ_USDMDP_FIFO_OVERFLOW */
3016 BNX2X_ERR("FATAL error from PXP\n");
3019 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3021 int port = BP_PORT(bp);
3024 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3025 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3027 val = REG_RD(bp, reg_offset);
3028 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3029 REG_WR(bp, reg_offset, val);
3031 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3032 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3037 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3041 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3043 if (attn & BNX2X_PMF_LINK_ASSERT) {
3044 int func = BP_FUNC(bp);
3046 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3047 val = SHMEM_RD(bp, func_mb[func].drv_status);
3048 if (val & DRV_STATUS_DCC_EVENT_MASK)
3050 (val & DRV_STATUS_DCC_EVENT_MASK));
3051 bnx2x__link_status_update(bp);
3052 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3053 bnx2x_pmf_update(bp);
3055 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3057 BNX2X_ERR("MC assert!\n");
3058 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3059 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3060 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3064 } else if (attn & BNX2X_MCP_ASSERT) {
3066 BNX2X_ERR("MCP assert!\n");
3067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3071 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3074 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3075 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3076 if (attn & BNX2X_GRC_TIMEOUT) {
3077 val = CHIP_IS_E1H(bp) ?
3078 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3079 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3081 if (attn & BNX2X_GRC_RSV) {
3082 val = CHIP_IS_E1H(bp) ?
3083 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3084 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3086 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3090 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3092 struct attn_route attn;
3093 struct attn_route group_mask;
3094 int port = BP_PORT(bp);
3100 /* need to take HW lock because MCP or other port might also
3101 try to handle this event */
3102 bnx2x_acquire_alr(bp);
3104 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3105 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3106 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3107 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3108 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3109 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3111 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3112 if (deasserted & (1 << index)) {
3113 group_mask = bp->attn_group[index];
3115 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3116 index, group_mask.sig[0], group_mask.sig[1],
3117 group_mask.sig[2], group_mask.sig[3]);
3119 bnx2x_attn_int_deasserted3(bp,
3120 attn.sig[3] & group_mask.sig[3]);
3121 bnx2x_attn_int_deasserted1(bp,
3122 attn.sig[1] & group_mask.sig[1]);
3123 bnx2x_attn_int_deasserted2(bp,
3124 attn.sig[2] & group_mask.sig[2]);
3125 bnx2x_attn_int_deasserted0(bp,
3126 attn.sig[0] & group_mask.sig[0]);
3128 if ((attn.sig[0] & group_mask.sig[0] &
3129 HW_PRTY_ASSERT_SET_0) ||
3130 (attn.sig[1] & group_mask.sig[1] &
3131 HW_PRTY_ASSERT_SET_1) ||
3132 (attn.sig[2] & group_mask.sig[2] &
3133 HW_PRTY_ASSERT_SET_2))
3134 BNX2X_ERR("FATAL HW block parity attention\n");
3138 bnx2x_release_alr(bp);
3140 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3143 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3145 REG_WR(bp, reg_addr, val);
3147 if (~bp->attn_state & deasserted)
3148 BNX2X_ERR("IGU ERROR\n");
3150 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3151 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3154 aeu_mask = REG_RD(bp, reg_addr);
3156 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3157 aeu_mask, deasserted);
3158 aeu_mask |= (deasserted & 0xff);
3159 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3161 REG_WR(bp, reg_addr, aeu_mask);
3162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3164 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3165 bp->attn_state &= ~deasserted;
3166 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3169 static void bnx2x_attn_int(struct bnx2x *bp)
3171 /* read local copy of bits */
3172 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3174 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3176 u32 attn_state = bp->attn_state;
3178 /* look for changed bits */
3179 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3180 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3183 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3184 attn_bits, attn_ack, asserted, deasserted);
3186 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3187 BNX2X_ERR("BAD attention state\n");
3189 /* handle bits that were raised */
3191 bnx2x_attn_int_asserted(bp, asserted);
3194 bnx2x_attn_int_deasserted(bp, deasserted);
3197 static void bnx2x_sp_task(struct work_struct *work)
3199 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3203 /* Return here if interrupt is disabled */
3204 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3205 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3209 status = bnx2x_update_dsb_idx(bp);
3210 /* if (status == 0) */
3211 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3213 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3219 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3221 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3223 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3225 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3227 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3232 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3234 struct net_device *dev = dev_instance;
3235 struct bnx2x *bp = netdev_priv(dev);
3237 /* Return here if interrupt is disabled */
3238 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3239 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3243 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3245 #ifdef BNX2X_STOP_ON_ERROR
3246 if (unlikely(bp->panic))
3250 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3255 /* end of slow path */
3259 /****************************************************************************
3261 ****************************************************************************/
3263 /* sum[hi:lo] += add[hi:lo] */
3264 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3267 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3270 /* difference = minuend - subtrahend */
3271 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3273 if (m_lo < s_lo) { \
3275 d_hi = m_hi - s_hi; \
3277 /* we can 'loan' 1 */ \
3279 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3281 /* m_hi <= s_hi */ \
3286 /* m_lo >= s_lo */ \
3287 if (m_hi < s_hi) { \
3291 /* m_hi >= s_hi */ \
3292 d_hi = m_hi - s_hi; \
3293 d_lo = m_lo - s_lo; \
3298 #define UPDATE_STAT64(s, t) \
3300 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3301 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3302 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3303 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3304 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3305 pstats->mac_stx[1].t##_lo, diff.lo); \
3308 #define UPDATE_STAT64_NIG(s, t) \
3310 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3311 diff.lo, new->s##_lo, old->s##_lo); \
3312 ADD_64(estats->t##_hi, diff.hi, \
3313 estats->t##_lo, diff.lo); \
3316 /* sum[hi:lo] += add */
3317 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3320 s_hi += (s_lo < a) ? 1 : 0; \
3323 #define UPDATE_EXTEND_STAT(s) \
3325 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3326 pstats->mac_stx[1].s##_lo, \
3330 #define UPDATE_EXTEND_TSTAT(s, t) \
3332 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3333 old_tclient->s = tclient->s; \
3334 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3337 #define UPDATE_EXTEND_USTAT(s, t) \
3339 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3340 old_uclient->s = uclient->s; \
3341 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3344 #define UPDATE_EXTEND_XSTAT(s, t) \
3346 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3347 old_xclient->s = xclient->s; \
3348 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3351 /* minuend -= subtrahend */
3352 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3354 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3357 /* minuend[hi:lo] -= subtrahend */
3358 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3360 SUB_64(m_hi, 0, m_lo, s); \
3363 #define SUB_EXTEND_USTAT(s, t) \
3365 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3366 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3370 * General service functions
3373 static inline long bnx2x_hilo(u32 *hiref)
3375 u32 lo = *(hiref + 1);
3376 #if (BITS_PER_LONG == 64)
3379 return HILO_U64(hi, lo);
3386 * Init service functions
3389 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3391 if (!bp->stats_pending) {
3392 struct eth_query_ramrod_data ramrod_data = {0};
3395 ramrod_data.drv_counter = bp->stats_counter++;
3396 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3397 for_each_queue(bp, i)
3398 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3400 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3401 ((u32 *)&ramrod_data)[1],
3402 ((u32 *)&ramrod_data)[0], 0);
3404 /* stats ramrod has it's own slot on the spq */
3406 bp->stats_pending = 1;
3411 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3413 struct dmae_command *dmae = &bp->stats_dmae;
3414 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3416 *stats_comp = DMAE_COMP_VAL;
3417 if (CHIP_REV_IS_SLOW(bp))
3421 if (bp->executer_idx) {
3422 int loader_idx = PMF_DMAE_C(bp);
3424 memset(dmae, 0, sizeof(struct dmae_command));
3426 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3427 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3428 DMAE_CMD_DST_RESET |
3430 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3432 DMAE_CMD_ENDIANITY_DW_SWAP |
3434 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3436 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3437 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3438 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3439 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3440 sizeof(struct dmae_command) *
3441 (loader_idx + 1)) >> 2;
3442 dmae->dst_addr_hi = 0;
3443 dmae->len = sizeof(struct dmae_command) >> 2;
3446 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3447 dmae->comp_addr_hi = 0;
3451 bnx2x_post_dmae(bp, dmae, loader_idx);
3453 } else if (bp->func_stx) {
3455 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3459 static int bnx2x_stats_comp(struct bnx2x *bp)
3461 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3465 while (*stats_comp != DMAE_COMP_VAL) {
3467 BNX2X_ERR("timeout waiting for stats finished\n");
3477 * Statistics service functions
3480 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3482 struct dmae_command *dmae;
3484 int loader_idx = PMF_DMAE_C(bp);
3485 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3488 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3489 BNX2X_ERR("BUG!\n");
3493 bp->executer_idx = 0;
3495 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3497 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3499 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3501 DMAE_CMD_ENDIANITY_DW_SWAP |
3503 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3504 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3506 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3507 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3508 dmae->src_addr_lo = bp->port.port_stx >> 2;
3509 dmae->src_addr_hi = 0;
3510 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3511 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3512 dmae->len = DMAE_LEN32_RD_MAX;
3513 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3514 dmae->comp_addr_hi = 0;
3517 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3518 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3519 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3520 dmae->src_addr_hi = 0;
3521 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3522 DMAE_LEN32_RD_MAX * 4);
3523 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3524 DMAE_LEN32_RD_MAX * 4);
3525 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3526 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3527 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3528 dmae->comp_val = DMAE_COMP_VAL;
3531 bnx2x_hw_stats_post(bp);
3532 bnx2x_stats_comp(bp);
3535 static void bnx2x_port_stats_init(struct bnx2x *bp)
3537 struct dmae_command *dmae;
3538 int port = BP_PORT(bp);
3539 int vn = BP_E1HVN(bp);
3541 int loader_idx = PMF_DMAE_C(bp);
3543 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3546 if (!bp->link_vars.link_up || !bp->port.pmf) {
3547 BNX2X_ERR("BUG!\n");
3551 bp->executer_idx = 0;
3554 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3555 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3556 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3558 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3560 DMAE_CMD_ENDIANITY_DW_SWAP |
3562 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3563 (vn << DMAE_CMD_E1HVN_SHIFT));
3565 if (bp->port.port_stx) {
3567 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3568 dmae->opcode = opcode;
3569 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3570 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3571 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3572 dmae->dst_addr_hi = 0;
3573 dmae->len = sizeof(struct host_port_stats) >> 2;
3574 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3575 dmae->comp_addr_hi = 0;
3581 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3582 dmae->opcode = opcode;
3583 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3584 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3585 dmae->dst_addr_lo = bp->func_stx >> 2;
3586 dmae->dst_addr_hi = 0;
3587 dmae->len = sizeof(struct host_func_stats) >> 2;
3588 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3589 dmae->comp_addr_hi = 0;
3594 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3595 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3596 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3598 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3600 DMAE_CMD_ENDIANITY_DW_SWAP |
3602 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3603 (vn << DMAE_CMD_E1HVN_SHIFT));
3605 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3607 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3608 NIG_REG_INGRESS_BMAC0_MEM);
3610 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3611 BIGMAC_REGISTER_TX_STAT_GTBYT */
3612 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3613 dmae->opcode = opcode;
3614 dmae->src_addr_lo = (mac_addr +
3615 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3616 dmae->src_addr_hi = 0;
3617 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3618 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3619 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3620 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3621 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3622 dmae->comp_addr_hi = 0;
3625 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3626 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3627 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3628 dmae->opcode = opcode;
3629 dmae->src_addr_lo = (mac_addr +
3630 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3631 dmae->src_addr_hi = 0;
3632 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3633 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3634 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3635 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3636 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3637 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3638 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3639 dmae->comp_addr_hi = 0;
3642 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3644 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3646 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3647 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3648 dmae->opcode = opcode;
3649 dmae->src_addr_lo = (mac_addr +
3650 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3651 dmae->src_addr_hi = 0;
3652 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3653 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3654 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3655 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3656 dmae->comp_addr_hi = 0;
3659 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3660 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661 dmae->opcode = opcode;
3662 dmae->src_addr_lo = (mac_addr +
3663 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3664 dmae->src_addr_hi = 0;
3665 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3666 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3667 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3668 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3670 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3671 dmae->comp_addr_hi = 0;
3674 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3675 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3676 dmae->opcode = opcode;
3677 dmae->src_addr_lo = (mac_addr +
3678 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3679 dmae->src_addr_hi = 0;
3680 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3681 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3682 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3683 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3684 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3685 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3686 dmae->comp_addr_hi = 0;
3691 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3692 dmae->opcode = opcode;
3693 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3694 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3695 dmae->src_addr_hi = 0;
3696 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3697 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3698 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3699 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3700 dmae->comp_addr_hi = 0;
3703 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3704 dmae->opcode = opcode;
3705 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3706 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3707 dmae->src_addr_hi = 0;
3708 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3709 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3710 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3711 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3712 dmae->len = (2*sizeof(u32)) >> 2;
3713 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3714 dmae->comp_addr_hi = 0;
3717 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3718 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3719 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3720 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3722 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3724 DMAE_CMD_ENDIANITY_DW_SWAP |
3726 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3727 (vn << DMAE_CMD_E1HVN_SHIFT));
3728 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3729 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3730 dmae->src_addr_hi = 0;
3731 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3732 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3733 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3734 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3735 dmae->len = (2*sizeof(u32)) >> 2;
3736 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3737 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3738 dmae->comp_val = DMAE_COMP_VAL;
3743 static void bnx2x_func_stats_init(struct bnx2x *bp)
3745 struct dmae_command *dmae = &bp->stats_dmae;
3746 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3749 if (!bp->func_stx) {
3750 BNX2X_ERR("BUG!\n");
3754 bp->executer_idx = 0;
3755 memset(dmae, 0, sizeof(struct dmae_command));
3757 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3758 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3759 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3761 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3763 DMAE_CMD_ENDIANITY_DW_SWAP |
3765 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3766 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3767 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3768 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3769 dmae->dst_addr_lo = bp->func_stx >> 2;
3770 dmae->dst_addr_hi = 0;
3771 dmae->len = sizeof(struct host_func_stats) >> 2;
3772 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3773 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3774 dmae->comp_val = DMAE_COMP_VAL;
3779 static void bnx2x_stats_start(struct bnx2x *bp)
3782 bnx2x_port_stats_init(bp);
3784 else if (bp->func_stx)
3785 bnx2x_func_stats_init(bp);
3787 bnx2x_hw_stats_post(bp);
3788 bnx2x_storm_stats_post(bp);
3791 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3793 bnx2x_stats_comp(bp);
3794 bnx2x_stats_pmf_update(bp);
3795 bnx2x_stats_start(bp);
3798 static void bnx2x_stats_restart(struct bnx2x *bp)
3800 bnx2x_stats_comp(bp);
3801 bnx2x_stats_start(bp);
3804 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3806 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3807 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3808 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3814 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3815 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3816 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3817 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3818 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3819 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3820 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3821 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3822 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3823 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3824 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3825 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3826 UPDATE_STAT64(tx_stat_gt127,
3827 tx_stat_etherstatspkts65octetsto127octets);
3828 UPDATE_STAT64(tx_stat_gt255,
3829 tx_stat_etherstatspkts128octetsto255octets);
3830 UPDATE_STAT64(tx_stat_gt511,
3831 tx_stat_etherstatspkts256octetsto511octets);
3832 UPDATE_STAT64(tx_stat_gt1023,
3833 tx_stat_etherstatspkts512octetsto1023octets);
3834 UPDATE_STAT64(tx_stat_gt1518,
3835 tx_stat_etherstatspkts1024octetsto1522octets);
3836 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3837 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3838 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3839 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3840 UPDATE_STAT64(tx_stat_gterr,
3841 tx_stat_dot3statsinternalmactransmiterrors);
3842 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3844 estats->pause_frames_received_hi =
3845 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3846 estats->pause_frames_received_lo =
3847 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3849 estats->pause_frames_sent_hi =
3850 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3851 estats->pause_frames_sent_lo =
3852 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3855 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3857 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3858 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3859 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3861 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3862 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3863 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3864 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3865 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3866 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3867 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3868 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3869 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3870 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3871 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3872 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3873 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3874 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3875 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3876 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3877 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3878 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3879 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3880 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3881 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3882 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3883 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3884 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3885 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3886 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3887 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3888 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3889 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3890 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3891 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3893 estats->pause_frames_received_hi =
3894 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3895 estats->pause_frames_received_lo =
3896 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3897 ADD_64(estats->pause_frames_received_hi,
3898 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3899 estats->pause_frames_received_lo,
3900 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3902 estats->pause_frames_sent_hi =
3903 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3904 estats->pause_frames_sent_lo =
3905 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3906 ADD_64(estats->pause_frames_sent_hi,
3907 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3908 estats->pause_frames_sent_lo,
3909 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3912 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3914 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3915 struct nig_stats *old = &(bp->port.old_nig_stats);
3916 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3917 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3924 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3925 bnx2x_bmac_stats_update(bp);
3927 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3928 bnx2x_emac_stats_update(bp);
3930 else { /* unreached */
3931 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3935 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3936 new->brb_discard - old->brb_discard);
3937 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3938 new->brb_truncate - old->brb_truncate);
3940 UPDATE_STAT64_NIG(egress_mac_pkt0,
3941 etherstatspkts1024octetsto1522octets);
3942 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3944 memcpy(old, new, sizeof(struct nig_stats));
3946 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3947 sizeof(struct mac_stx));
3948 estats->brb_drop_hi = pstats->brb_drop_hi;
3949 estats->brb_drop_lo = pstats->brb_drop_lo;
3951 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3953 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3954 if (nig_timer_max != estats->nig_timer_max) {
3955 estats->nig_timer_max = nig_timer_max;
3956 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3962 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3964 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3965 struct tstorm_per_port_stats *tport =
3966 &stats->tstorm_common.port_statistics;
3967 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3968 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3971 memcpy(&(fstats->total_bytes_received_hi),
3972 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3973 sizeof(struct host_func_stats) - 2*sizeof(u32));
3974 estats->error_bytes_received_hi = 0;
3975 estats->error_bytes_received_lo = 0;
3976 estats->etherstatsoverrsizepkts_hi = 0;
3977 estats->etherstatsoverrsizepkts_lo = 0;
3978 estats->no_buff_discard_hi = 0;
3979 estats->no_buff_discard_lo = 0;
3981 for_each_rx_queue(bp, i) {
3982 struct bnx2x_fastpath *fp = &bp->fp[i];
3983 int cl_id = fp->cl_id;
3984 struct tstorm_per_client_stats *tclient =
3985 &stats->tstorm_common.client_statistics[cl_id];
3986 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3987 struct ustorm_per_client_stats *uclient =
3988 &stats->ustorm_common.client_statistics[cl_id];
3989 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3990 struct xstorm_per_client_stats *xclient =
3991 &stats->xstorm_common.client_statistics[cl_id];
3992 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3993 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3996 /* are storm stats valid? */
3997 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3998 bp->stats_counter) {
3999 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4000 " xstorm counter (%d) != stats_counter (%d)\n",
4001 i, xclient->stats_counter, bp->stats_counter);
4004 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4005 bp->stats_counter) {
4006 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4007 " tstorm counter (%d) != stats_counter (%d)\n",
4008 i, tclient->stats_counter, bp->stats_counter);
4011 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4012 bp->stats_counter) {
4013 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4014 " ustorm counter (%d) != stats_counter (%d)\n",
4015 i, uclient->stats_counter, bp->stats_counter);
4019 qstats->total_bytes_received_hi =
4020 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4021 qstats->total_bytes_received_lo =
4022 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4024 ADD_64(qstats->total_bytes_received_hi,
4025 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4026 qstats->total_bytes_received_lo,
4027 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4029 ADD_64(qstats->total_bytes_received_hi,
4030 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4031 qstats->total_bytes_received_lo,
4032 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4034 qstats->valid_bytes_received_hi =
4035 qstats->total_bytes_received_hi;
4036 qstats->valid_bytes_received_lo =
4037 qstats->total_bytes_received_lo;
4039 qstats->error_bytes_received_hi =
4040 le32_to_cpu(tclient->rcv_error_bytes.hi);
4041 qstats->error_bytes_received_lo =
4042 le32_to_cpu(tclient->rcv_error_bytes.lo);
4044 ADD_64(qstats->total_bytes_received_hi,
4045 qstats->error_bytes_received_hi,
4046 qstats->total_bytes_received_lo,
4047 qstats->error_bytes_received_lo);
4049 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4050 total_unicast_packets_received);
4051 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4052 total_multicast_packets_received);
4053 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4054 total_broadcast_packets_received);
4055 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4056 etherstatsoverrsizepkts);
4057 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4059 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4060 total_unicast_packets_received);
4061 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4062 total_multicast_packets_received);
4063 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4064 total_broadcast_packets_received);
4065 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4066 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4067 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4069 qstats->total_bytes_transmitted_hi =
4070 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4071 qstats->total_bytes_transmitted_lo =
4072 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4074 ADD_64(qstats->total_bytes_transmitted_hi,
4075 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4076 qstats->total_bytes_transmitted_lo,
4077 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4079 ADD_64(qstats->total_bytes_transmitted_hi,
4080 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4081 qstats->total_bytes_transmitted_lo,
4082 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4084 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4085 total_unicast_packets_transmitted);
4086 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4087 total_multicast_packets_transmitted);
4088 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4089 total_broadcast_packets_transmitted);
4091 old_tclient->checksum_discard = tclient->checksum_discard;
4092 old_tclient->ttl0_discard = tclient->ttl0_discard;
4094 ADD_64(fstats->total_bytes_received_hi,
4095 qstats->total_bytes_received_hi,
4096 fstats->total_bytes_received_lo,
4097 qstats->total_bytes_received_lo);
4098 ADD_64(fstats->total_bytes_transmitted_hi,
4099 qstats->total_bytes_transmitted_hi,
4100 fstats->total_bytes_transmitted_lo,
4101 qstats->total_bytes_transmitted_lo);
4102 ADD_64(fstats->total_unicast_packets_received_hi,
4103 qstats->total_unicast_packets_received_hi,
4104 fstats->total_unicast_packets_received_lo,
4105 qstats->total_unicast_packets_received_lo);
4106 ADD_64(fstats->total_multicast_packets_received_hi,
4107 qstats->total_multicast_packets_received_hi,
4108 fstats->total_multicast_packets_received_lo,
4109 qstats->total_multicast_packets_received_lo);
4110 ADD_64(fstats->total_broadcast_packets_received_hi,
4111 qstats->total_broadcast_packets_received_hi,
4112 fstats->total_broadcast_packets_received_lo,
4113 qstats->total_broadcast_packets_received_lo);
4114 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4115 qstats->total_unicast_packets_transmitted_hi,
4116 fstats->total_unicast_packets_transmitted_lo,
4117 qstats->total_unicast_packets_transmitted_lo);
4118 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4119 qstats->total_multicast_packets_transmitted_hi,
4120 fstats->total_multicast_packets_transmitted_lo,
4121 qstats->total_multicast_packets_transmitted_lo);
4122 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4123 qstats->total_broadcast_packets_transmitted_hi,
4124 fstats->total_broadcast_packets_transmitted_lo,
4125 qstats->total_broadcast_packets_transmitted_lo);
4126 ADD_64(fstats->valid_bytes_received_hi,
4127 qstats->valid_bytes_received_hi,
4128 fstats->valid_bytes_received_lo,
4129 qstats->valid_bytes_received_lo);
4131 ADD_64(estats->error_bytes_received_hi,
4132 qstats->error_bytes_received_hi,
4133 estats->error_bytes_received_lo,
4134 qstats->error_bytes_received_lo);
4135 ADD_64(estats->etherstatsoverrsizepkts_hi,
4136 qstats->etherstatsoverrsizepkts_hi,
4137 estats->etherstatsoverrsizepkts_lo,
4138 qstats->etherstatsoverrsizepkts_lo);
4139 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4140 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4143 ADD_64(fstats->total_bytes_received_hi,
4144 estats->rx_stat_ifhcinbadoctets_hi,
4145 fstats->total_bytes_received_lo,
4146 estats->rx_stat_ifhcinbadoctets_lo);
4148 memcpy(estats, &(fstats->total_bytes_received_hi),
4149 sizeof(struct host_func_stats) - 2*sizeof(u32));
4151 ADD_64(estats->etherstatsoverrsizepkts_hi,
4152 estats->rx_stat_dot3statsframestoolong_hi,
4153 estats->etherstatsoverrsizepkts_lo,
4154 estats->rx_stat_dot3statsframestoolong_lo);
4155 ADD_64(estats->error_bytes_received_hi,
4156 estats->rx_stat_ifhcinbadoctets_hi,
4157 estats->error_bytes_received_lo,
4158 estats->rx_stat_ifhcinbadoctets_lo);
4161 estats->mac_filter_discard =
4162 le32_to_cpu(tport->mac_filter_discard);
4163 estats->xxoverflow_discard =
4164 le32_to_cpu(tport->xxoverflow_discard);
4165 estats->brb_truncate_discard =
4166 le32_to_cpu(tport->brb_truncate_discard);
4167 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4170 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4172 bp->stats_pending = 0;
4177 static void bnx2x_net_stats_update(struct bnx2x *bp)
4179 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4180 struct net_device_stats *nstats = &bp->dev->stats;
4183 nstats->rx_packets =
4184 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4185 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4186 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4188 nstats->tx_packets =
4189 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4190 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4191 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4193 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4195 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4197 nstats->rx_dropped = estats->mac_discard;
4198 for_each_rx_queue(bp, i)
4199 nstats->rx_dropped +=
4200 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4202 nstats->tx_dropped = 0;
4205 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4207 nstats->collisions =
4208 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4210 nstats->rx_length_errors =
4211 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4212 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4213 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4214 bnx2x_hilo(&estats->brb_truncate_hi);
4215 nstats->rx_crc_errors =
4216 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4217 nstats->rx_frame_errors =
4218 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4219 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4220 nstats->rx_missed_errors = estats->xxoverflow_discard;
4222 nstats->rx_errors = nstats->rx_length_errors +
4223 nstats->rx_over_errors +
4224 nstats->rx_crc_errors +
4225 nstats->rx_frame_errors +
4226 nstats->rx_fifo_errors +
4227 nstats->rx_missed_errors;
4229 nstats->tx_aborted_errors =
4230 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4231 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4232 nstats->tx_carrier_errors =
4233 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4234 nstats->tx_fifo_errors = 0;
4235 nstats->tx_heartbeat_errors = 0;
4236 nstats->tx_window_errors = 0;
4238 nstats->tx_errors = nstats->tx_aborted_errors +
4239 nstats->tx_carrier_errors +
4240 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4243 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4245 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4248 estats->driver_xoff = 0;
4249 estats->rx_err_discard_pkt = 0;
4250 estats->rx_skb_alloc_failed = 0;
4251 estats->hw_csum_err = 0;
4252 for_each_rx_queue(bp, i) {
4253 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4255 estats->driver_xoff += qstats->driver_xoff;
4256 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4257 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4258 estats->hw_csum_err += qstats->hw_csum_err;
4262 static void bnx2x_stats_update(struct bnx2x *bp)
4264 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4266 if (*stats_comp != DMAE_COMP_VAL)
4270 bnx2x_hw_stats_update(bp);
4272 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4273 BNX2X_ERR("storm stats were not updated for 3 times\n");
4278 bnx2x_net_stats_update(bp);
4279 bnx2x_drv_stats_update(bp);
4281 if (bp->msglevel & NETIF_MSG_TIMER) {
4282 struct bnx2x_fastpath *fp0_rx = bp->fp;
4283 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4284 struct tstorm_per_client_stats *old_tclient =
4285 &bp->fp->old_tclient;
4286 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4287 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4288 struct net_device_stats *nstats = &bp->dev->stats;
4291 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4292 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4294 bnx2x_tx_avail(fp0_tx),
4295 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4296 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4298 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4299 fp0_rx->rx_comp_cons),
4300 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4301 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4302 "brb truncate %u\n",
4303 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4304 qstats->driver_xoff,
4305 estats->brb_drop_lo, estats->brb_truncate_lo);
4306 printk(KERN_DEBUG "tstats: checksum_discard %u "
4307 "packets_too_big_discard %lu no_buff_discard %lu "
4308 "mac_discard %u mac_filter_discard %u "
4309 "xxovrflow_discard %u brb_truncate_discard %u "
4310 "ttl0_discard %u\n",
4311 le32_to_cpu(old_tclient->checksum_discard),
4312 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4313 bnx2x_hilo(&qstats->no_buff_discard_hi),
4314 estats->mac_discard, estats->mac_filter_discard,
4315 estats->xxoverflow_discard, estats->brb_truncate_discard,
4316 le32_to_cpu(old_tclient->ttl0_discard));
4318 for_each_queue(bp, i) {
4319 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4320 bnx2x_fp(bp, i, tx_pkt),
4321 bnx2x_fp(bp, i, rx_pkt),
4322 bnx2x_fp(bp, i, rx_calls));
4326 bnx2x_hw_stats_post(bp);
4327 bnx2x_storm_stats_post(bp);
4330 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4332 struct dmae_command *dmae;
4334 int loader_idx = PMF_DMAE_C(bp);
4335 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4337 bp->executer_idx = 0;
4339 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4341 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4343 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4345 DMAE_CMD_ENDIANITY_DW_SWAP |
4347 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4348 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4350 if (bp->port.port_stx) {
4352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4354 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4356 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4357 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4358 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4359 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4360 dmae->dst_addr_hi = 0;
4361 dmae->len = sizeof(struct host_port_stats) >> 2;
4363 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4364 dmae->comp_addr_hi = 0;
4367 dmae->comp_addr_lo =
4368 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4369 dmae->comp_addr_hi =
4370 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4371 dmae->comp_val = DMAE_COMP_VAL;
4379 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4380 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4381 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4382 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4383 dmae->dst_addr_lo = bp->func_stx >> 2;
4384 dmae->dst_addr_hi = 0;
4385 dmae->len = sizeof(struct host_func_stats) >> 2;
4386 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4388 dmae->comp_val = DMAE_COMP_VAL;
4394 static void bnx2x_stats_stop(struct bnx2x *bp)
4398 bnx2x_stats_comp(bp);
4401 update = (bnx2x_hw_stats_update(bp) == 0);
4403 update |= (bnx2x_storm_stats_update(bp) == 0);
4406 bnx2x_net_stats_update(bp);
4409 bnx2x_port_stats_stop(bp);
4411 bnx2x_hw_stats_post(bp);
4412 bnx2x_stats_comp(bp);
4416 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4420 static const struct {
4421 void (*action)(struct bnx2x *bp);
4422 enum bnx2x_stats_state next_state;
4423 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4426 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4427 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4428 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4429 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4432 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4433 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4434 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4435 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4439 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4441 enum bnx2x_stats_state state = bp->stats_state;
4443 bnx2x_stats_stm[state][event].action(bp);
4444 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4446 /* Make sure the state has been "changed" */
4449 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4450 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4451 state, event, bp->stats_state);
4454 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4456 struct dmae_command *dmae;
4457 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4460 if (!bp->port.pmf || !bp->port.port_stx) {
4461 BNX2X_ERR("BUG!\n");
4465 bp->executer_idx = 0;
4467 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4468 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4469 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4470 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4472 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4474 DMAE_CMD_ENDIANITY_DW_SWAP |
4476 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4477 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4478 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4479 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4480 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4481 dmae->dst_addr_hi = 0;
4482 dmae->len = sizeof(struct host_port_stats) >> 2;
4483 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4484 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4485 dmae->comp_val = DMAE_COMP_VAL;
4488 bnx2x_hw_stats_post(bp);
4489 bnx2x_stats_comp(bp);
4492 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4494 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4495 int port = BP_PORT(bp);
4500 if (!bp->port.pmf || !bp->func_stx) {
4501 BNX2X_ERR("BUG!\n");
4505 /* save our func_stx */
4506 func_stx = bp->func_stx;
4508 for (vn = VN_0; vn < vn_max; vn++) {
4511 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4512 bnx2x_func_stats_init(bp);
4513 bnx2x_hw_stats_post(bp);
4514 bnx2x_stats_comp(bp);
4517 /* restore our func_stx */
4518 bp->func_stx = func_stx;
4521 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4523 struct dmae_command *dmae = &bp->stats_dmae;
4524 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4527 if (!bp->func_stx) {
4528 BNX2X_ERR("BUG!\n");
4532 bp->executer_idx = 0;
4533 memset(dmae, 0, sizeof(struct dmae_command));
4535 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4536 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4537 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4539 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4541 DMAE_CMD_ENDIANITY_DW_SWAP |
4543 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4544 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4545 dmae->src_addr_lo = bp->func_stx >> 2;
4546 dmae->src_addr_hi = 0;
4547 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4548 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4549 dmae->len = sizeof(struct host_func_stats) >> 2;
4550 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4551 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4552 dmae->comp_val = DMAE_COMP_VAL;
4555 bnx2x_hw_stats_post(bp);
4556 bnx2x_stats_comp(bp);
4559 static void bnx2x_stats_init(struct bnx2x *bp)
4561 int port = BP_PORT(bp);
4562 int func = BP_FUNC(bp);
4565 bp->stats_pending = 0;
4566 bp->executer_idx = 0;
4567 bp->stats_counter = 0;
4569 /* port and func stats for management */
4570 if (!BP_NOMCP(bp)) {
4571 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4572 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4575 bp->port.port_stx = 0;
4578 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4579 bp->port.port_stx, bp->func_stx);
4582 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4583 bp->port.old_nig_stats.brb_discard =
4584 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4585 bp->port.old_nig_stats.brb_truncate =
4586 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4587 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4588 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4589 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4590 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4592 /* function stats */
4593 for_each_queue(bp, i) {
4594 struct bnx2x_fastpath *fp = &bp->fp[i];
4596 memset(&fp->old_tclient, 0,
4597 sizeof(struct tstorm_per_client_stats));
4598 memset(&fp->old_uclient, 0,
4599 sizeof(struct ustorm_per_client_stats));
4600 memset(&fp->old_xclient, 0,
4601 sizeof(struct xstorm_per_client_stats));
4602 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4605 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4606 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4608 bp->stats_state = STATS_STATE_DISABLED;
4611 if (bp->port.port_stx)
4612 bnx2x_port_stats_base_init(bp);
4615 bnx2x_func_stats_base_init(bp);
4617 } else if (bp->func_stx)
4618 bnx2x_func_stats_base_update(bp);
4621 static void bnx2x_timer(unsigned long data)
4623 struct bnx2x *bp = (struct bnx2x *) data;
4625 if (!netif_running(bp->dev))
4628 if (atomic_read(&bp->intr_sem) != 0)
4632 struct bnx2x_fastpath *fp = &bp->fp[0];
4636 rc = bnx2x_rx_int(fp, 1000);
4639 if (!BP_NOMCP(bp)) {
4640 int func = BP_FUNC(bp);
4644 ++bp->fw_drv_pulse_wr_seq;
4645 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4646 /* TBD - add SYSTEM_TIME */
4647 drv_pulse = bp->fw_drv_pulse_wr_seq;
4648 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4650 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4651 MCP_PULSE_SEQ_MASK);
4652 /* The delta between driver pulse and mcp response
4653 * should be 1 (before mcp response) or 0 (after mcp response)
4655 if ((drv_pulse != mcp_pulse) &&
4656 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4657 /* someone lost a heartbeat... */
4658 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4659 drv_pulse, mcp_pulse);
4663 if ((bp->state == BNX2X_STATE_OPEN) ||
4664 (bp->state == BNX2X_STATE_DISABLED))
4665 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4668 mod_timer(&bp->timer, jiffies + bp->current_interval);
4671 /* end of Statistics */
4676 * nic init service functions
4679 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4681 int port = BP_PORT(bp);
4684 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4685 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4686 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4687 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4688 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4689 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4692 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4693 dma_addr_t mapping, int sb_id)
4695 int port = BP_PORT(bp);
4696 int func = BP_FUNC(bp);
4701 section = ((u64)mapping) + offsetof(struct host_status_block,
4703 sb->u_status_block.status_block_id = sb_id;
4705 REG_WR(bp, BAR_CSTRORM_INTMEM +
4706 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4707 REG_WR(bp, BAR_CSTRORM_INTMEM +
4708 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4710 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4711 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4713 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4714 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4715 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4718 section = ((u64)mapping) + offsetof(struct host_status_block,
4720 sb->c_status_block.status_block_id = sb_id;
4722 REG_WR(bp, BAR_CSTRORM_INTMEM +
4723 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4724 REG_WR(bp, BAR_CSTRORM_INTMEM +
4725 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4727 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4728 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4730 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4731 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4734 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4737 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4739 int func = BP_FUNC(bp);
4741 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4742 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4743 sizeof(struct tstorm_def_status_block)/4);
4744 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4745 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4746 sizeof(struct cstorm_def_status_block_u)/4);
4747 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4748 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4749 sizeof(struct cstorm_def_status_block_c)/4);
4750 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4751 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4752 sizeof(struct xstorm_def_status_block)/4);
4755 static void bnx2x_init_def_sb(struct bnx2x *bp,
4756 struct host_def_status_block *def_sb,
4757 dma_addr_t mapping, int sb_id)
4759 int port = BP_PORT(bp);
4760 int func = BP_FUNC(bp);
4761 int index, val, reg_offset;
4765 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4766 atten_status_block);
4767 def_sb->atten_status_block.status_block_id = sb_id;
4771 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4772 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4774 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4775 bp->attn_group[index].sig[0] = REG_RD(bp,
4776 reg_offset + 0x10*index);
4777 bp->attn_group[index].sig[1] = REG_RD(bp,
4778 reg_offset + 0x4 + 0x10*index);
4779 bp->attn_group[index].sig[2] = REG_RD(bp,
4780 reg_offset + 0x8 + 0x10*index);
4781 bp->attn_group[index].sig[3] = REG_RD(bp,
4782 reg_offset + 0xc + 0x10*index);
4785 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4786 HC_REG_ATTN_MSG0_ADDR_L);
4788 REG_WR(bp, reg_offset, U64_LO(section));
4789 REG_WR(bp, reg_offset + 4, U64_HI(section));
4791 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4793 val = REG_RD(bp, reg_offset);
4795 REG_WR(bp, reg_offset, val);
4798 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4799 u_def_status_block);
4800 def_sb->u_def_status_block.status_block_id = sb_id;
4802 REG_WR(bp, BAR_CSTRORM_INTMEM +
4803 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4804 REG_WR(bp, BAR_CSTRORM_INTMEM +
4805 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4807 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4808 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4810 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4811 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4812 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 c_def_status_block);
4817 def_sb->c_def_status_block.status_block_id = sb_id;
4819 REG_WR(bp, BAR_CSTRORM_INTMEM +
4820 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4821 REG_WR(bp, BAR_CSTRORM_INTMEM +
4822 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4824 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4825 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4827 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4828 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 t_def_status_block);
4834 def_sb->t_def_status_block.status_block_id = sb_id;
4836 REG_WR(bp, BAR_TSTRORM_INTMEM +
4837 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4838 REG_WR(bp, BAR_TSTRORM_INTMEM +
4839 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4841 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4842 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4844 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4845 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4846 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 x_def_status_block);
4851 def_sb->x_def_status_block.status_block_id = sb_id;
4853 REG_WR(bp, BAR_XSTRORM_INTMEM +
4854 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4855 REG_WR(bp, BAR_XSTRORM_INTMEM +
4856 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4858 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4859 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4861 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4863 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4865 bp->stats_pending = 0;
4866 bp->set_mac_pending = 0;
4868 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4871 static void bnx2x_update_coalesce(struct bnx2x *bp)
4873 int port = BP_PORT(bp);
4876 for_each_queue(bp, i) {
4877 int sb_id = bp->fp[i].sb_id;
4879 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4880 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4881 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4882 U_SB_ETH_RX_CQ_INDEX),
4884 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4885 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4886 U_SB_ETH_RX_CQ_INDEX),
4887 (bp->rx_ticks/12) ? 0 : 1);
4889 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4890 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4891 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4892 C_SB_ETH_TX_CQ_INDEX),
4894 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4895 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4896 C_SB_ETH_TX_CQ_INDEX),
4897 (bp->tx_ticks/12) ? 0 : 1);
4901 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4902 struct bnx2x_fastpath *fp, int last)
4906 for (i = 0; i < last; i++) {
4907 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4908 struct sk_buff *skb = rx_buf->skb;
4911 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4915 if (fp->tpa_state[i] == BNX2X_TPA_START)
4916 pci_unmap_single(bp->pdev,
4917 pci_unmap_addr(rx_buf, mapping),
4918 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4925 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4927 int func = BP_FUNC(bp);
4928 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4929 ETH_MAX_AGGREGATION_QUEUES_E1H;
4930 u16 ring_prod, cqe_ring_prod;
4933 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4935 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4937 if (bp->flags & TPA_ENABLE_FLAG) {
4939 for_each_rx_queue(bp, j) {
4940 struct bnx2x_fastpath *fp = &bp->fp[j];
4942 for (i = 0; i < max_agg_queues; i++) {
4943 fp->tpa_pool[i].skb =
4944 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4945 if (!fp->tpa_pool[i].skb) {
4946 BNX2X_ERR("Failed to allocate TPA "
4947 "skb pool for queue[%d] - "
4948 "disabling TPA on this "
4950 bnx2x_free_tpa_pool(bp, fp, i);
4951 fp->disable_tpa = 1;
4954 pci_unmap_addr_set((struct sw_rx_bd *)
4955 &bp->fp->tpa_pool[i],
4957 fp->tpa_state[i] = BNX2X_TPA_STOP;
4962 for_each_rx_queue(bp, j) {
4963 struct bnx2x_fastpath *fp = &bp->fp[j];
4966 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4967 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4969 /* Mark queue as Rx */
4970 fp->is_rx_queue = 1;
4972 /* "next page" elements initialization */
4974 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4975 struct eth_rx_sge *sge;
4977 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4979 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4980 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4982 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4983 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4986 bnx2x_init_sge_ring_bit_mask(fp);
4989 for (i = 1; i <= NUM_RX_RINGS; i++) {
4990 struct eth_rx_bd *rx_bd;
4992 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4994 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4995 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4997 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4998 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5002 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5003 struct eth_rx_cqe_next_page *nextpg;
5005 nextpg = (struct eth_rx_cqe_next_page *)
5006 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5008 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5009 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5011 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5012 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5015 /* Allocate SGEs and initialize the ring elements */
5016 for (i = 0, ring_prod = 0;
5017 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5019 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5020 BNX2X_ERR("was only able to allocate "
5022 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5023 /* Cleanup already allocated elements */
5024 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5025 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5026 fp->disable_tpa = 1;
5030 ring_prod = NEXT_SGE_IDX(ring_prod);
5032 fp->rx_sge_prod = ring_prod;
5034 /* Allocate BDs and initialize BD ring */
5035 fp->rx_comp_cons = 0;
5036 cqe_ring_prod = ring_prod = 0;
5037 for (i = 0; i < bp->rx_ring_size; i++) {
5038 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5039 BNX2X_ERR("was only able to allocate "
5040 "%d rx skbs on queue[%d]\n", i, j);
5041 fp->eth_q_stats.rx_skb_alloc_failed++;
5044 ring_prod = NEXT_RX_IDX(ring_prod);
5045 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5046 WARN_ON(ring_prod <= i);
5049 fp->rx_bd_prod = ring_prod;
5050 /* must not have more available CQEs than BDs */
5051 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5053 fp->rx_pkt = fp->rx_calls = 0;
5056 * this will generate an interrupt (to the TSTORM)
5057 * must only be done after chip is initialized
5059 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5064 REG_WR(bp, BAR_USTRORM_INTMEM +
5065 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5066 U64_LO(fp->rx_comp_mapping));
5067 REG_WR(bp, BAR_USTRORM_INTMEM +
5068 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5069 U64_HI(fp->rx_comp_mapping));
5073 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5077 for_each_tx_queue(bp, j) {
5078 struct bnx2x_fastpath *fp = &bp->fp[j];
5080 for (i = 1; i <= NUM_TX_RINGS; i++) {
5081 struct eth_tx_next_bd *tx_next_bd =
5082 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5084 tx_next_bd->addr_hi =
5085 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5086 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5087 tx_next_bd->addr_lo =
5088 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5089 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5092 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5093 fp->tx_db.data.zero_fill1 = 0;
5094 fp->tx_db.data.prod = 0;
5096 fp->tx_pkt_prod = 0;
5097 fp->tx_pkt_cons = 0;
5100 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5104 /* clean tx statistics */
5105 for_each_rx_queue(bp, i)
5106 bnx2x_fp(bp, i, tx_pkt) = 0;
5109 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5111 int func = BP_FUNC(bp);
5113 spin_lock_init(&bp->spq_lock);
5115 bp->spq_left = MAX_SPQ_PENDING;
5116 bp->spq_prod_idx = 0;
5117 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5118 bp->spq_prod_bd = bp->spq;
5119 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5121 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5122 U64_LO(bp->spq_mapping));
5124 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5125 U64_HI(bp->spq_mapping));
5127 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5131 static void bnx2x_init_context(struct bnx2x *bp)
5135 for_each_rx_queue(bp, i) {
5136 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5137 struct bnx2x_fastpath *fp = &bp->fp[i];
5138 u8 cl_id = fp->cl_id;
5140 context->ustorm_st_context.common.sb_index_numbers =
5141 BNX2X_RX_SB_INDEX_NUM;
5142 context->ustorm_st_context.common.clientId = cl_id;
5143 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5144 context->ustorm_st_context.common.flags =
5145 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5146 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5147 context->ustorm_st_context.common.statistics_counter_id =
5149 context->ustorm_st_context.common.mc_alignment_log_size =
5150 BNX2X_RX_ALIGN_SHIFT;
5151 context->ustorm_st_context.common.bd_buff_size =
5153 context->ustorm_st_context.common.bd_page_base_hi =
5154 U64_HI(fp->rx_desc_mapping);
5155 context->ustorm_st_context.common.bd_page_base_lo =
5156 U64_LO(fp->rx_desc_mapping);
5157 if (!fp->disable_tpa) {
5158 context->ustorm_st_context.common.flags |=
5159 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5160 context->ustorm_st_context.common.sge_buff_size =
5161 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5163 context->ustorm_st_context.common.sge_page_base_hi =
5164 U64_HI(fp->rx_sge_mapping);
5165 context->ustorm_st_context.common.sge_page_base_lo =
5166 U64_LO(fp->rx_sge_mapping);
5168 context->ustorm_st_context.common.max_sges_for_packet =
5169 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5170 context->ustorm_st_context.common.max_sges_for_packet =
5171 ((context->ustorm_st_context.common.
5172 max_sges_for_packet + PAGES_PER_SGE - 1) &
5173 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5176 context->ustorm_ag_context.cdu_usage =
5177 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5178 CDU_REGION_NUMBER_UCM_AG,
5179 ETH_CONNECTION_TYPE);
5181 context->xstorm_ag_context.cdu_reserved =
5182 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5183 CDU_REGION_NUMBER_XCM_AG,
5184 ETH_CONNECTION_TYPE);
5187 for_each_tx_queue(bp, i) {
5188 struct bnx2x_fastpath *fp = &bp->fp[i];
5189 struct eth_context *context =
5190 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5192 context->cstorm_st_context.sb_index_number =
5193 C_SB_ETH_TX_CQ_INDEX;
5194 context->cstorm_st_context.status_block_id = fp->sb_id;
5196 context->xstorm_st_context.tx_bd_page_base_hi =
5197 U64_HI(fp->tx_desc_mapping);
5198 context->xstorm_st_context.tx_bd_page_base_lo =
5199 U64_LO(fp->tx_desc_mapping);
5200 context->xstorm_st_context.statistics_data = (fp->cl_id |
5201 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5205 static void bnx2x_init_ind_table(struct bnx2x *bp)
5207 int func = BP_FUNC(bp);
5210 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5214 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5215 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5216 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5217 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5218 bp->fp->cl_id + (i % bp->num_rx_queues));
5221 static void bnx2x_set_client_config(struct bnx2x *bp)
5223 struct tstorm_eth_client_config tstorm_client = {0};
5224 int port = BP_PORT(bp);
5227 tstorm_client.mtu = bp->dev->mtu;
5228 tstorm_client.config_flags =
5229 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5230 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5232 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5233 tstorm_client.config_flags |=
5234 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5235 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5239 for_each_queue(bp, i) {
5240 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5242 REG_WR(bp, BAR_TSTRORM_INTMEM +
5243 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5244 ((u32 *)&tstorm_client)[0]);
5245 REG_WR(bp, BAR_TSTRORM_INTMEM +
5246 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5247 ((u32 *)&tstorm_client)[1]);
5250 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5251 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5254 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5256 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5257 int mode = bp->rx_mode;
5258 int mask = bp->rx_mode_cl_mask;
5259 int func = BP_FUNC(bp);
5260 int port = BP_PORT(bp);
5262 /* All but management unicast packets should pass to the host as well */
5264 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5265 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5266 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5267 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5269 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5272 case BNX2X_RX_MODE_NONE: /* no Rx */
5273 tstorm_mac_filter.ucast_drop_all = mask;
5274 tstorm_mac_filter.mcast_drop_all = mask;
5275 tstorm_mac_filter.bcast_drop_all = mask;
5278 case BNX2X_RX_MODE_NORMAL:
5279 tstorm_mac_filter.bcast_accept_all = mask;
5282 case BNX2X_RX_MODE_ALLMULTI:
5283 tstorm_mac_filter.mcast_accept_all = mask;
5284 tstorm_mac_filter.bcast_accept_all = mask;
5287 case BNX2X_RX_MODE_PROMISC:
5288 tstorm_mac_filter.ucast_accept_all = mask;
5289 tstorm_mac_filter.mcast_accept_all = mask;
5290 tstorm_mac_filter.bcast_accept_all = mask;
5291 /* pass management unicast packets as well */
5292 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5296 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5301 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5304 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5305 REG_WR(bp, BAR_TSTRORM_INTMEM +
5306 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5307 ((u32 *)&tstorm_mac_filter)[i]);
5309 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5310 ((u32 *)&tstorm_mac_filter)[i]); */
5313 if (mode != BNX2X_RX_MODE_NONE)
5314 bnx2x_set_client_config(bp);
5317 static void bnx2x_init_internal_common(struct bnx2x *bp)
5321 /* Zero this manually as its initialization is
5322 currently missing in the initTool */
5323 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5324 REG_WR(bp, BAR_USTRORM_INTMEM +
5325 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5328 static void bnx2x_init_internal_port(struct bnx2x *bp)
5330 int port = BP_PORT(bp);
5333 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5335 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5336 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5337 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5340 static void bnx2x_init_internal_func(struct bnx2x *bp)
5342 struct tstorm_eth_function_common_config tstorm_config = {0};
5343 struct stats_indication_flags stats_flags = {0};
5344 int port = BP_PORT(bp);
5345 int func = BP_FUNC(bp);
5351 tstorm_config.config_flags = MULTI_FLAGS(bp);
5352 tstorm_config.rss_result_mask = MULTI_MASK;
5355 /* Enable TPA if needed */
5356 if (bp->flags & TPA_ENABLE_FLAG)
5357 tstorm_config.config_flags |=
5358 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5361 tstorm_config.config_flags |=
5362 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5364 tstorm_config.leading_client_id = BP_L_ID(bp);
5366 REG_WR(bp, BAR_TSTRORM_INTMEM +
5367 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5368 (*(u32 *)&tstorm_config));
5370 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5371 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5372 bnx2x_set_storm_rx_mode(bp);
5374 for_each_queue(bp, i) {
5375 u8 cl_id = bp->fp[i].cl_id;
5377 /* reset xstorm per client statistics */
5378 offset = BAR_XSTRORM_INTMEM +
5379 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5381 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5382 REG_WR(bp, offset + j*4, 0);
5384 /* reset tstorm per client statistics */
5385 offset = BAR_TSTRORM_INTMEM +
5386 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5388 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5389 REG_WR(bp, offset + j*4, 0);
5391 /* reset ustorm per client statistics */
5392 offset = BAR_USTRORM_INTMEM +
5393 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5395 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5396 REG_WR(bp, offset + j*4, 0);
5399 /* Init statistics related context */
5400 stats_flags.collect_eth = 1;
5402 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5403 ((u32 *)&stats_flags)[0]);
5404 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5405 ((u32 *)&stats_flags)[1]);
5407 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5408 ((u32 *)&stats_flags)[0]);
5409 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5410 ((u32 *)&stats_flags)[1]);
5412 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5413 ((u32 *)&stats_flags)[0]);
5414 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5415 ((u32 *)&stats_flags)[1]);
5417 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5418 ((u32 *)&stats_flags)[0]);
5419 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5420 ((u32 *)&stats_flags)[1]);
5422 REG_WR(bp, BAR_XSTRORM_INTMEM +
5423 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5424 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5425 REG_WR(bp, BAR_XSTRORM_INTMEM +
5426 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5427 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5429 REG_WR(bp, BAR_TSTRORM_INTMEM +
5430 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5431 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5432 REG_WR(bp, BAR_TSTRORM_INTMEM +
5433 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5434 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5436 REG_WR(bp, BAR_USTRORM_INTMEM +
5437 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5438 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5439 REG_WR(bp, BAR_USTRORM_INTMEM +
5440 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5441 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5443 if (CHIP_IS_E1H(bp)) {
5444 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5446 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5448 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5450 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5453 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5457 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5459 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5460 SGE_PAGE_SIZE * PAGES_PER_SGE),
5462 for_each_rx_queue(bp, i) {
5463 struct bnx2x_fastpath *fp = &bp->fp[i];
5465 REG_WR(bp, BAR_USTRORM_INTMEM +
5466 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5467 U64_LO(fp->rx_comp_mapping));
5468 REG_WR(bp, BAR_USTRORM_INTMEM +
5469 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5470 U64_HI(fp->rx_comp_mapping));
5473 REG_WR(bp, BAR_USTRORM_INTMEM +
5474 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5475 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5476 REG_WR(bp, BAR_USTRORM_INTMEM +
5477 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5478 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5480 REG_WR16(bp, BAR_USTRORM_INTMEM +
5481 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5485 /* dropless flow control */
5486 if (CHIP_IS_E1H(bp)) {
5487 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5489 rx_pause.bd_thr_low = 250;
5490 rx_pause.cqe_thr_low = 250;
5492 rx_pause.sge_thr_low = 0;
5493 rx_pause.bd_thr_high = 350;
5494 rx_pause.cqe_thr_high = 350;
5495 rx_pause.sge_thr_high = 0;
5497 for_each_rx_queue(bp, i) {
5498 struct bnx2x_fastpath *fp = &bp->fp[i];
5500 if (!fp->disable_tpa) {
5501 rx_pause.sge_thr_low = 150;
5502 rx_pause.sge_thr_high = 250;
5506 offset = BAR_USTRORM_INTMEM +
5507 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5510 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5512 REG_WR(bp, offset + j*4,
5513 ((u32 *)&rx_pause)[j]);
5517 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5519 /* Init rate shaping and fairness contexts */
5523 /* During init there is no active link
5524 Until link is up, set link rate to 10Gbps */
5525 bp->link_vars.line_speed = SPEED_10000;
5526 bnx2x_init_port_minmax(bp);
5528 bnx2x_calc_vn_weight_sum(bp);
5530 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5531 bnx2x_init_vn_minmax(bp, 2*vn + port);
5533 /* Enable rate shaping and fairness */
5534 bp->cmng.flags.cmng_enables =
5535 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5536 if (bp->vn_weight_sum)
5537 bp->cmng.flags.cmng_enables |=
5538 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5540 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5541 " fairness will be disabled\n");
5543 /* rate shaping and fairness are disabled */
5545 "single function mode minmax will be disabled\n");
5549 /* Store it to internal memory */
5551 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5552 REG_WR(bp, BAR_XSTRORM_INTMEM +
5553 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5554 ((u32 *)(&bp->cmng))[i]);
5557 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5559 switch (load_code) {
5560 case FW_MSG_CODE_DRV_LOAD_COMMON:
5561 bnx2x_init_internal_common(bp);
5564 case FW_MSG_CODE_DRV_LOAD_PORT:
5565 bnx2x_init_internal_port(bp);
5568 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5569 bnx2x_init_internal_func(bp);
5573 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5578 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5582 for_each_queue(bp, i) {
5583 struct bnx2x_fastpath *fp = &bp->fp[i];
5586 fp->state = BNX2X_FP_STATE_CLOSED;
5588 fp->cl_id = BP_L_ID(bp) + i;
5590 fp->sb_id = fp->cl_id + 1;
5592 fp->sb_id = fp->cl_id;
5594 /* Suitable Rx and Tx SBs are served by the same client */
5595 if (i >= bp->num_rx_queues)
5596 fp->cl_id -= bp->num_rx_queues;
5598 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5599 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5600 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5602 bnx2x_update_fpsb_idx(fp);
5605 /* ensure status block indices were read */
5609 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5611 bnx2x_update_dsb_idx(bp);
5612 bnx2x_update_coalesce(bp);
5613 bnx2x_init_rx_rings(bp);
5614 bnx2x_init_tx_ring(bp);
5615 bnx2x_init_sp_ring(bp);
5616 bnx2x_init_context(bp);
5617 bnx2x_init_internal(bp, load_code);
5618 bnx2x_init_ind_table(bp);
5619 bnx2x_stats_init(bp);
5621 /* At this point, we are ready for interrupts */
5622 atomic_set(&bp->intr_sem, 0);
5624 /* flush all before enabling interrupts */
5628 bnx2x_int_enable(bp);
5630 /* Check for SPIO5 */
5631 bnx2x_attn_int_deasserted0(bp,
5632 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5633 AEU_INPUTS_ATTN_BITS_SPIO5);
5636 /* end of nic init */
5639 * gzip service functions
5642 static int bnx2x_gunzip_init(struct bnx2x *bp)
5644 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5645 &bp->gunzip_mapping);
5646 if (bp->gunzip_buf == NULL)
5649 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5650 if (bp->strm == NULL)
5653 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5655 if (bp->strm->workspace == NULL)
5665 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5666 bp->gunzip_mapping);
5667 bp->gunzip_buf = NULL;
5670 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5671 " un-compression\n", bp->dev->name);
5675 static void bnx2x_gunzip_end(struct bnx2x *bp)
5677 kfree(bp->strm->workspace);
5682 if (bp->gunzip_buf) {
5683 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5684 bp->gunzip_mapping);
5685 bp->gunzip_buf = NULL;
5689 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5693 /* check gzip header */
5694 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5695 BNX2X_ERR("Bad gzip header\n");
5703 if (zbuf[3] & FNAME)
5704 while ((zbuf[n++] != 0) && (n < len));
5706 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5707 bp->strm->avail_in = len - n;
5708 bp->strm->next_out = bp->gunzip_buf;
5709 bp->strm->avail_out = FW_BUF_SIZE;
5711 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5715 rc = zlib_inflate(bp->strm, Z_FINISH);
5716 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5717 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5718 bp->dev->name, bp->strm->msg);
5720 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5721 if (bp->gunzip_outlen & 0x3)
5722 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5723 " gunzip_outlen (%d) not aligned\n",
5724 bp->dev->name, bp->gunzip_outlen);
5725 bp->gunzip_outlen >>= 2;
5727 zlib_inflateEnd(bp->strm);
5729 if (rc == Z_STREAM_END)
5735 /* nic load/unload */
5738 * General service functions
5741 /* send a NIG loopback debug packet */
5742 static void bnx2x_lb_pckt(struct bnx2x *bp)
5746 /* Ethernet source and destination addresses */
5747 wb_write[0] = 0x55555555;
5748 wb_write[1] = 0x55555555;
5749 wb_write[2] = 0x20; /* SOP */
5750 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5752 /* NON-IP protocol */
5753 wb_write[0] = 0x09000000;
5754 wb_write[1] = 0x55555555;
5755 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5756 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5759 /* some of the internal memories
5760 * are not directly readable from the driver
5761 * to test them we send debug packets
5763 static int bnx2x_int_mem_test(struct bnx2x *bp)
5769 if (CHIP_REV_IS_FPGA(bp))
5771 else if (CHIP_REV_IS_EMUL(bp))
5776 DP(NETIF_MSG_HW, "start part1\n");
5778 /* Disable inputs of parser neighbor blocks */
5779 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5780 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5781 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5782 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5784 /* Write 0 to parser credits for CFC search request */
5785 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5787 /* send Ethernet packet */
5790 /* TODO do i reset NIG statistic? */
5791 /* Wait until NIG register shows 1 packet of size 0x10 */
5792 count = 1000 * factor;
5795 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5796 val = *bnx2x_sp(bp, wb_data[0]);
5804 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5808 /* Wait until PRS register shows 1 packet */
5809 count = 1000 * factor;
5811 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5819 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5823 /* Reset and init BRB, PRS */
5824 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5826 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5828 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5829 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5831 DP(NETIF_MSG_HW, "part2\n");
5833 /* Disable inputs of parser neighbor blocks */
5834 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5835 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5836 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5837 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5839 /* Write 0 to parser credits for CFC search request */
5840 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5842 /* send 10 Ethernet packets */
5843 for (i = 0; i < 10; i++)
5846 /* Wait until NIG register shows 10 + 1
5847 packets of size 11*0x10 = 0xb0 */
5848 count = 1000 * factor;
5851 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5852 val = *bnx2x_sp(bp, wb_data[0]);
5860 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5864 /* Wait until PRS register shows 2 packets */
5865 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5867 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5869 /* Write 1 to parser credits for CFC search request */
5870 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5872 /* Wait until PRS register shows 3 packets */
5873 msleep(10 * factor);
5874 /* Wait until NIG register shows 1 packet of size 0x10 */
5875 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5877 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5879 /* clear NIG EOP FIFO */
5880 for (i = 0; i < 11; i++)
5881 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5882 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5884 BNX2X_ERR("clear of NIG failed\n");
5888 /* Reset and init BRB, PRS, NIG */
5889 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5891 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5893 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5894 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5897 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5900 /* Enable inputs of parser neighbor blocks */
5901 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5902 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5903 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5904 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5906 DP(NETIF_MSG_HW, "done\n");
5911 static void enable_blocks_attention(struct bnx2x *bp)
5913 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5914 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5915 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5916 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5917 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5918 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5919 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5920 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5921 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5922 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5923 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5924 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5925 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5926 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5927 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5928 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5929 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5930 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5931 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5932 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5933 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5934 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5935 if (CHIP_REV_IS_FPGA(bp))
5936 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5938 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5939 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5940 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5941 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5942 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5943 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5944 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5945 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5946 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5947 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5951 static void bnx2x_reset_common(struct bnx2x *bp)
5954 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5956 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5959 static void bnx2x_init_pxp(struct bnx2x *bp)
5962 int r_order, w_order;
5964 pci_read_config_word(bp->pdev,
5965 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5966 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5967 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5969 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5971 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5975 bnx2x_init_pxp_arb(bp, r_order, w_order);
5978 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5984 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5985 SHARED_HW_CFG_FAN_FAILURE_MASK;
5987 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5991 * The fan failure mechanism is usually related to the PHY type since
5992 * the power consumption of the board is affected by the PHY. Currently,
5993 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5995 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5996 for (port = PORT_0; port < PORT_MAX; port++) {
5998 SHMEM_RD(bp, dev_info.port_hw_config[port].
5999 external_phy_config) &
6000 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6003 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6005 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6007 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6010 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6012 if (is_required == 0)
6015 /* Fan failure is indicated by SPIO 5 */
6016 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6017 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6019 /* set to active low mode */
6020 val = REG_RD(bp, MISC_REG_SPIO_INT);
6021 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6022 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6023 REG_WR(bp, MISC_REG_SPIO_INT, val);
6025 /* enable interrupt to signal the IGU */
6026 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6027 val |= (1 << MISC_REGISTERS_SPIO_5);
6028 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6031 static int bnx2x_init_common(struct bnx2x *bp)
6038 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6040 bnx2x_reset_common(bp);
6041 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6044 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6045 if (CHIP_IS_E1H(bp))
6046 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6048 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6050 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6052 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6053 if (CHIP_IS_E1(bp)) {
6054 /* enable HW interrupt from PXP on USDM overflow
6055 bit 16 on INT_MASK_0 */
6056 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6059 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6063 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6064 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6065 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6066 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6067 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6068 /* make sure this value is 0 */
6069 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6071 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6072 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6073 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6074 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6075 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6078 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6080 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6081 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6082 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6085 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6086 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6088 /* let the HW do it's magic ... */
6090 /* finish PXP init */
6091 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6093 BNX2X_ERR("PXP2 CFG failed\n");
6096 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6098 BNX2X_ERR("PXP2 RD_INIT failed\n");
6102 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6103 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6105 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6107 /* clean the DMAE memory */
6109 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6111 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6112 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6113 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6114 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6116 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6117 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6118 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6119 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6121 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6126 for (i = 0; i < 64; i++) {
6127 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6128 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6130 if (CHIP_IS_E1H(bp)) {
6131 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6132 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6137 /* soft reset pulse */
6138 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6139 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6142 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6145 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6146 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6147 if (!CHIP_REV_IS_SLOW(bp)) {
6148 /* enable hw interrupt from doorbell Q */
6149 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6152 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6153 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6154 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6157 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6159 if (CHIP_IS_E1H(bp))
6160 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6162 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6163 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6164 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6165 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6167 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6168 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6169 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6170 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6172 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6173 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6174 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6175 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6178 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6180 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6183 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6184 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6185 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6187 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6188 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6189 REG_WR(bp, i, 0xc0cac01a);
6190 /* TODO: replace with something meaningful */
6192 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6194 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6195 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6196 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6197 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6198 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6199 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6200 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6201 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6205 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6207 if (sizeof(union cdu_context) != 1024)
6208 /* we currently assume that a context is 1024 bytes */
6209 printk(KERN_ALERT PFX "please adjust the size of"
6210 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6212 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6213 val = (4 << 24) + (0 << 12) + 1024;
6214 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6216 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6217 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6218 /* enable context validation interrupt from CFC */
6219 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6221 /* set the thresholds to prevent CFC/CDU race */
6222 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6224 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6225 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6227 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6228 /* Reset PCIE errors for debug */
6229 REG_WR(bp, 0x2814, 0xffffffff);
6230 REG_WR(bp, 0x3820, 0xffffffff);
6232 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6233 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6234 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6235 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6237 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6238 if (CHIP_IS_E1H(bp)) {
6239 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6240 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6243 if (CHIP_REV_IS_SLOW(bp))
6246 /* finish CFC init */
6247 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6249 BNX2X_ERR("CFC LL_INIT failed\n");
6252 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6254 BNX2X_ERR("CFC AC_INIT failed\n");
6257 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6259 BNX2X_ERR("CFC CAM_INIT failed\n");
6262 REG_WR(bp, CFC_REG_DEBUG0, 0);
6264 /* read NIG statistic
6265 to see if this is our first up since powerup */
6266 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6267 val = *bnx2x_sp(bp, wb_data[0]);
6269 /* do internal memory self test */
6270 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6271 BNX2X_ERR("internal mem self test failed\n");
6275 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6276 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6277 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6278 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6280 bp->port.need_hw_lock = 1;
6287 bnx2x_setup_fan_failure_detection(bp);
6289 /* clear PXP2 attentions */
6290 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6292 enable_blocks_attention(bp);
6294 if (!BP_NOMCP(bp)) {
6295 bnx2x_acquire_phy_lock(bp);
6296 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6297 bnx2x_release_phy_lock(bp);
6299 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6304 static int bnx2x_init_port(struct bnx2x *bp)
6306 int port = BP_PORT(bp);
6307 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6311 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6313 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6315 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6316 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6318 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6319 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6320 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6321 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6324 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6326 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6327 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6328 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6330 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6332 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6333 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6334 /* no pause for emulation and FPGA */
6339 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6340 else if (bp->dev->mtu > 4096) {
6341 if (bp->flags & ONE_PORT_FLAG)
6345 /* (24*1024 + val*4)/256 */
6346 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6349 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6350 high = low + 56; /* 14*1024/256 */
6352 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6353 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6356 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6358 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6359 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6360 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6361 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6363 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6364 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6365 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6366 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6368 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6369 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6371 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6373 /* configure PBF to work without PAUSE mtu 9000 */
6374 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6376 /* update threshold */
6377 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6378 /* update init credit */
6379 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6382 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6384 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6387 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6389 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6390 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6392 if (CHIP_IS_E1(bp)) {
6393 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6394 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6396 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6398 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6399 /* init aeu_mask_attn_func_0/1:
6400 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6401 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6402 * bits 4-7 are used for "per vn group attention" */
6403 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6404 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6406 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6407 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6408 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6409 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6410 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6412 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6414 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6416 if (CHIP_IS_E1H(bp)) {
6417 /* 0x2 disable e1hov, 0x1 enable */
6418 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6419 (IS_E1HMF(bp) ? 0x1 : 0x2));
6422 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6423 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6424 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6428 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6429 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6431 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6434 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6436 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6437 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6439 /* The GPIO should be swapped if the swap register is
6441 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6442 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6444 /* Select function upon port-swap configuration */
6446 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6447 aeu_gpio_mask = (swap_val && swap_override) ?
6448 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6449 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6451 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6452 aeu_gpio_mask = (swap_val && swap_override) ?
6453 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6454 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6456 val = REG_RD(bp, offset);
6457 /* add GPIO3 to group */
6458 val |= aeu_gpio_mask;
6459 REG_WR(bp, offset, val);
6463 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6464 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6465 /* add SPIO 5 to group 0 */
6467 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6468 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6469 val = REG_RD(bp, reg_addr);
6470 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6471 REG_WR(bp, reg_addr, val);
6479 bnx2x__link_reset(bp);
6484 #define ILT_PER_FUNC (768/2)
6485 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6486 /* the phys address is shifted right 12 bits and has an added
6487 1=valid bit added to the 53rd bit
6488 then since this is a wide register(TM)
6489 we split it into two 32 bit writes
6491 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6492 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6493 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6494 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6497 #define CNIC_ILT_LINES 127
6498 #define CNIC_CTX_PER_ILT 16
6500 #define CNIC_ILT_LINES 0
6503 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6507 if (CHIP_IS_E1H(bp))
6508 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6510 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6512 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6515 static int bnx2x_init_func(struct bnx2x *bp)
6517 int port = BP_PORT(bp);
6518 int func = BP_FUNC(bp);
6522 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6524 /* set MSI reconfigure capability */
6525 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6526 val = REG_RD(bp, addr);
6527 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6528 REG_WR(bp, addr, val);
6530 i = FUNC_ILT_BASE(func);
6532 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6533 if (CHIP_IS_E1H(bp)) {
6534 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6535 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6537 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6538 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6541 i += 1 + CNIC_ILT_LINES;
6542 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6544 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6546 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6547 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6551 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6553 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6555 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6556 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6560 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6562 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6564 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6565 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6568 /* tell the searcher where the T2 table is */
6569 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6571 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6572 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6574 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6575 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6576 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6578 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6581 if (CHIP_IS_E1H(bp)) {
6582 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6583 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6584 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6585 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6586 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6587 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6588 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6589 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6590 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6593 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6596 /* HC init per function */
6597 if (CHIP_IS_E1H(bp)) {
6598 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6600 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6601 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6603 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6605 /* Reset PCIE errors for debug */
6606 REG_WR(bp, 0x2114, 0xffffffff);
6607 REG_WR(bp, 0x2120, 0xffffffff);
6612 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6616 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6617 BP_FUNC(bp), load_code);
6620 mutex_init(&bp->dmae_mutex);
6621 rc = bnx2x_gunzip_init(bp);
6625 switch (load_code) {
6626 case FW_MSG_CODE_DRV_LOAD_COMMON:
6627 rc = bnx2x_init_common(bp);
6632 case FW_MSG_CODE_DRV_LOAD_PORT:
6634 rc = bnx2x_init_port(bp);
6639 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6641 rc = bnx2x_init_func(bp);
6647 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6651 if (!BP_NOMCP(bp)) {
6652 int func = BP_FUNC(bp);
6654 bp->fw_drv_pulse_wr_seq =
6655 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6656 DRV_PULSE_SEQ_MASK);
6657 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6660 /* this needs to be done before gunzip end */
6661 bnx2x_zero_def_sb(bp);
6662 for_each_queue(bp, i)
6663 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6665 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6669 bnx2x_gunzip_end(bp);
6674 static void bnx2x_free_mem(struct bnx2x *bp)
6677 #define BNX2X_PCI_FREE(x, y, size) \
6680 pci_free_consistent(bp->pdev, size, x, y); \
6686 #define BNX2X_FREE(x) \
6698 for_each_queue(bp, i) {
6701 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6702 bnx2x_fp(bp, i, status_blk_mapping),
6703 sizeof(struct host_status_block));
6706 for_each_rx_queue(bp, i) {
6708 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6709 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6710 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6711 bnx2x_fp(bp, i, rx_desc_mapping),
6712 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6714 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6715 bnx2x_fp(bp, i, rx_comp_mapping),
6716 sizeof(struct eth_fast_path_rx_cqe) *
6720 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6721 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6722 bnx2x_fp(bp, i, rx_sge_mapping),
6723 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6726 for_each_tx_queue(bp, i) {
6728 /* fastpath tx rings: tx_buf tx_desc */
6729 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6730 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6731 bnx2x_fp(bp, i, tx_desc_mapping),
6732 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6734 /* end of fastpath */
6736 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6737 sizeof(struct host_def_status_block));
6739 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6740 sizeof(struct bnx2x_slowpath));
6743 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6744 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6745 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6746 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6747 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6748 sizeof(struct host_status_block));
6750 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6752 #undef BNX2X_PCI_FREE
6756 static int bnx2x_alloc_mem(struct bnx2x *bp)
6759 #define BNX2X_PCI_ALLOC(x, y, size) \
6761 x = pci_alloc_consistent(bp->pdev, size, y); \
6763 goto alloc_mem_err; \
6764 memset(x, 0, size); \
6767 #define BNX2X_ALLOC(x, size) \
6769 x = vmalloc(size); \
6771 goto alloc_mem_err; \
6772 memset(x, 0, size); \
6779 for_each_queue(bp, i) {
6780 bnx2x_fp(bp, i, bp) = bp;
6783 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6784 &bnx2x_fp(bp, i, status_blk_mapping),
6785 sizeof(struct host_status_block));
6788 for_each_rx_queue(bp, i) {
6790 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6791 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6792 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6793 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6794 &bnx2x_fp(bp, i, rx_desc_mapping),
6795 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6797 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6798 &bnx2x_fp(bp, i, rx_comp_mapping),
6799 sizeof(struct eth_fast_path_rx_cqe) *
6803 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6804 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6805 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6806 &bnx2x_fp(bp, i, rx_sge_mapping),
6807 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6810 for_each_tx_queue(bp, i) {
6812 /* fastpath tx rings: tx_buf tx_desc */
6813 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6814 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6815 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6816 &bnx2x_fp(bp, i, tx_desc_mapping),
6817 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6819 /* end of fastpath */
6821 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6822 sizeof(struct host_def_status_block));
6824 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6825 sizeof(struct bnx2x_slowpath));
6828 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6830 /* allocate searcher T2 table
6831 we allocate 1/4 of alloc num for T2
6832 (which is not entered into the ILT) */
6833 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6835 /* Initialize T2 (for 1024 connections) */
6836 for (i = 0; i < 16*1024; i += 64)
6837 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6839 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6840 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6842 /* QM queues (128*MAX_CONN) */
6843 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6845 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6846 sizeof(struct host_status_block));
6849 /* Slow path ring */
6850 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6858 #undef BNX2X_PCI_ALLOC
6862 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6866 for_each_tx_queue(bp, i) {
6867 struct bnx2x_fastpath *fp = &bp->fp[i];
6869 u16 bd_cons = fp->tx_bd_cons;
6870 u16 sw_prod = fp->tx_pkt_prod;
6871 u16 sw_cons = fp->tx_pkt_cons;
6873 while (sw_cons != sw_prod) {
6874 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6880 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6884 for_each_rx_queue(bp, j) {
6885 struct bnx2x_fastpath *fp = &bp->fp[j];
6887 for (i = 0; i < NUM_RX_BD; i++) {
6888 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6889 struct sk_buff *skb = rx_buf->skb;
6894 pci_unmap_single(bp->pdev,
6895 pci_unmap_addr(rx_buf, mapping),
6896 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6901 if (!fp->disable_tpa)
6902 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6903 ETH_MAX_AGGREGATION_QUEUES_E1 :
6904 ETH_MAX_AGGREGATION_QUEUES_E1H);
6908 static void bnx2x_free_skbs(struct bnx2x *bp)
6910 bnx2x_free_tx_skbs(bp);
6911 bnx2x_free_rx_skbs(bp);
6914 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6918 free_irq(bp->msix_table[0].vector, bp->dev);
6919 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6920 bp->msix_table[0].vector);
6925 for_each_queue(bp, i) {
6926 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6927 "state %x\n", i, bp->msix_table[i + offset].vector,
6928 bnx2x_fp(bp, i, state));
6930 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6934 static void bnx2x_free_irq(struct bnx2x *bp)
6936 if (bp->flags & USING_MSIX_FLAG) {
6937 bnx2x_free_msix_irqs(bp);
6938 pci_disable_msix(bp->pdev);
6939 bp->flags &= ~USING_MSIX_FLAG;
6941 } else if (bp->flags & USING_MSI_FLAG) {
6942 free_irq(bp->pdev->irq, bp->dev);
6943 pci_disable_msi(bp->pdev);
6944 bp->flags &= ~USING_MSI_FLAG;
6947 free_irq(bp->pdev->irq, bp->dev);
6950 static int bnx2x_enable_msix(struct bnx2x *bp)
6952 int i, rc, offset = 1;
6955 bp->msix_table[0].entry = igu_vec;
6956 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6959 igu_vec = BP_L_ID(bp) + offset;
6960 bp->msix_table[1].entry = igu_vec;
6961 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6964 for_each_queue(bp, i) {
6965 igu_vec = BP_L_ID(bp) + offset + i;
6966 bp->msix_table[i + offset].entry = igu_vec;
6967 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6968 "(fastpath #%u)\n", i + offset, igu_vec, i);
6971 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6972 BNX2X_NUM_QUEUES(bp) + offset);
6974 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6978 bp->flags |= USING_MSIX_FLAG;
6983 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6985 int i, rc, offset = 1;
6987 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6988 bp->dev->name, bp->dev);
6990 BNX2X_ERR("request sp irq failed\n");
6997 for_each_queue(bp, i) {
6998 struct bnx2x_fastpath *fp = &bp->fp[i];
7000 if (i < bp->num_rx_queues)
7001 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7003 sprintf(fp->name, "%s-tx-%d",
7004 bp->dev->name, i - bp->num_rx_queues);
7006 rc = request_irq(bp->msix_table[i + offset].vector,
7007 bnx2x_msix_fp_int, 0, fp->name, fp);
7009 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7010 bnx2x_free_msix_irqs(bp);
7014 fp->state = BNX2X_FP_STATE_IRQ;
7017 i = BNX2X_NUM_QUEUES(bp);
7018 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7020 bp->dev->name, bp->msix_table[0].vector,
7021 0, bp->msix_table[offset].vector,
7022 i - 1, bp->msix_table[offset + i - 1].vector);
7027 static int bnx2x_enable_msi(struct bnx2x *bp)
7031 rc = pci_enable_msi(bp->pdev);
7033 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7036 bp->flags |= USING_MSI_FLAG;
7041 static int bnx2x_req_irq(struct bnx2x *bp)
7043 unsigned long flags;
7046 if (bp->flags & USING_MSI_FLAG)
7049 flags = IRQF_SHARED;
7051 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7052 bp->dev->name, bp->dev);
7054 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7059 static void bnx2x_napi_enable(struct bnx2x *bp)
7063 for_each_rx_queue(bp, i)
7064 napi_enable(&bnx2x_fp(bp, i, napi));
7067 static void bnx2x_napi_disable(struct bnx2x *bp)
7071 for_each_rx_queue(bp, i)
7072 napi_disable(&bnx2x_fp(bp, i, napi));
7075 static void bnx2x_netif_start(struct bnx2x *bp)
7079 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7080 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7083 if (netif_running(bp->dev)) {
7084 bnx2x_napi_enable(bp);
7085 bnx2x_int_enable(bp);
7086 if (bp->state == BNX2X_STATE_OPEN)
7087 netif_tx_wake_all_queues(bp->dev);
7092 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7094 bnx2x_int_disable_sync(bp, disable_hw);
7095 bnx2x_napi_disable(bp);
7096 netif_tx_disable(bp->dev);
7097 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7101 * Init service functions
7105 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7107 * @param bp driver descriptor
7108 * @param set set or clear an entry (1 or 0)
7109 * @param mac pointer to a buffer containing a MAC
7110 * @param cl_bit_vec bit vector of clients to register a MAC for
7111 * @param cam_offset offset in a CAM to use
7112 * @param with_bcast set broadcast MAC as well
7114 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7115 u32 cl_bit_vec, u8 cam_offset,
7118 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7119 int port = BP_PORT(bp);
7122 * unicasts 0-31:port0 32-63:port1
7123 * multicast 64-127:port0 128-191:port1
7125 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7126 config->hdr.offset = cam_offset;
7127 config->hdr.client_id = 0xff;
7128 config->hdr.reserved1 = 0;
7131 config->config_table[0].cam_entry.msb_mac_addr =
7132 swab16(*(u16 *)&mac[0]);
7133 config->config_table[0].cam_entry.middle_mac_addr =
7134 swab16(*(u16 *)&mac[2]);
7135 config->config_table[0].cam_entry.lsb_mac_addr =
7136 swab16(*(u16 *)&mac[4]);
7137 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7139 config->config_table[0].target_table_entry.flags = 0;
7141 CAM_INVALIDATE(config->config_table[0]);
7142 config->config_table[0].target_table_entry.clients_bit_vector =
7143 cpu_to_le32(cl_bit_vec);
7144 config->config_table[0].target_table_entry.vlan_id = 0;
7146 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7147 (set ? "setting" : "clearing"),
7148 config->config_table[0].cam_entry.msb_mac_addr,
7149 config->config_table[0].cam_entry.middle_mac_addr,
7150 config->config_table[0].cam_entry.lsb_mac_addr);
7154 config->config_table[1].cam_entry.msb_mac_addr =
7155 cpu_to_le16(0xffff);
7156 config->config_table[1].cam_entry.middle_mac_addr =
7157 cpu_to_le16(0xffff);
7158 config->config_table[1].cam_entry.lsb_mac_addr =
7159 cpu_to_le16(0xffff);
7160 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7162 config->config_table[1].target_table_entry.flags =
7163 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7165 CAM_INVALIDATE(config->config_table[1]);
7166 config->config_table[1].target_table_entry.clients_bit_vector =
7167 cpu_to_le32(cl_bit_vec);
7168 config->config_table[1].target_table_entry.vlan_id = 0;
7171 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7172 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7173 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7177 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7179 * @param bp driver descriptor
7180 * @param set set or clear an entry (1 or 0)
7181 * @param mac pointer to a buffer containing a MAC
7182 * @param cl_bit_vec bit vector of clients to register a MAC for
7183 * @param cam_offset offset in a CAM to use
7185 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7186 u32 cl_bit_vec, u8 cam_offset)
7188 struct mac_configuration_cmd_e1h *config =
7189 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7191 config->hdr.length = 1;
7192 config->hdr.offset = cam_offset;
7193 config->hdr.client_id = 0xff;
7194 config->hdr.reserved1 = 0;
7197 config->config_table[0].msb_mac_addr =
7198 swab16(*(u16 *)&mac[0]);
7199 config->config_table[0].middle_mac_addr =
7200 swab16(*(u16 *)&mac[2]);
7201 config->config_table[0].lsb_mac_addr =
7202 swab16(*(u16 *)&mac[4]);
7203 config->config_table[0].clients_bit_vector =
7204 cpu_to_le32(cl_bit_vec);
7205 config->config_table[0].vlan_id = 0;
7206 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7208 config->config_table[0].flags = BP_PORT(bp);
7210 config->config_table[0].flags =
7211 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7213 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7214 (set ? "setting" : "clearing"),
7215 config->config_table[0].msb_mac_addr,
7216 config->config_table[0].middle_mac_addr,
7217 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7219 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7220 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7221 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7224 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7225 int *state_p, int poll)
7227 /* can take a while if any port is running */
7230 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7231 poll ? "polling" : "waiting", state, idx);
7236 bnx2x_rx_int(bp->fp, 10);
7237 /* if index is different from 0
7238 * the reply for some commands will
7239 * be on the non default queue
7242 bnx2x_rx_int(&bp->fp[idx], 10);
7245 mb(); /* state is changed by bnx2x_sp_event() */
7246 if (*state_p == state) {
7247 #ifdef BNX2X_STOP_ON_ERROR
7248 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7260 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7261 poll ? "polling" : "waiting", state, idx);
7262 #ifdef BNX2X_STOP_ON_ERROR
7269 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7271 bp->set_mac_pending++;
7274 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7275 (1 << bp->fp->cl_id), BP_FUNC(bp));
7277 /* Wait for a completion */
7278 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7281 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7283 bp->set_mac_pending++;
7286 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7287 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7290 /* Wait for a completion */
7291 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7294 static int bnx2x_setup_leading(struct bnx2x *bp)
7298 /* reset IGU state */
7299 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7302 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7304 /* Wait for completion */
7305 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7310 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7312 struct bnx2x_fastpath *fp = &bp->fp[index];
7314 /* reset IGU state */
7315 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7318 fp->state = BNX2X_FP_STATE_OPENING;
7319 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7322 /* Wait for completion */
7323 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7327 static int bnx2x_poll(struct napi_struct *napi, int budget);
7329 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7330 int *num_tx_queues_out)
7332 int _num_rx_queues = 0, _num_tx_queues = 0;
7334 switch (bp->multi_mode) {
7335 case ETH_RSS_MODE_DISABLED:
7340 case ETH_RSS_MODE_REGULAR:
7342 _num_rx_queues = min_t(u32, num_rx_queues,
7343 BNX2X_MAX_QUEUES(bp));
7345 _num_rx_queues = min_t(u32, num_online_cpus(),
7346 BNX2X_MAX_QUEUES(bp));
7349 _num_tx_queues = min_t(u32, num_tx_queues,
7350 BNX2X_MAX_QUEUES(bp));
7352 _num_tx_queues = min_t(u32, num_online_cpus(),
7353 BNX2X_MAX_QUEUES(bp));
7355 /* There must be not more Tx queues than Rx queues */
7356 if (_num_tx_queues > _num_rx_queues) {
7357 BNX2X_ERR("number of tx queues (%d) > "
7358 "number of rx queues (%d)"
7359 " defaulting to %d\n",
7360 _num_tx_queues, _num_rx_queues,
7362 _num_tx_queues = _num_rx_queues;
7373 *num_rx_queues_out = _num_rx_queues;
7374 *num_tx_queues_out = _num_tx_queues;
7377 static int bnx2x_set_int_mode(struct bnx2x *bp)
7384 bp->num_rx_queues = 1;
7385 bp->num_tx_queues = 1;
7386 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7391 /* Set interrupt mode according to bp->multi_mode value */
7392 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7393 &bp->num_tx_queues);
7395 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7396 bp->num_rx_queues, bp->num_tx_queues);
7398 /* if we can't use MSI-X we only need one fp,
7399 * so try to enable MSI-X with the requested number of fp's
7400 * and fallback to MSI or legacy INTx with one fp
7402 rc = bnx2x_enable_msix(bp);
7404 /* failed to enable MSI-X */
7406 BNX2X_ERR("Multi requested but failed to "
7407 "enable MSI-X (rx %d tx %d), "
7408 "set number of queues to 1\n",
7409 bp->num_rx_queues, bp->num_tx_queues);
7410 bp->num_rx_queues = 1;
7411 bp->num_tx_queues = 1;
7415 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7420 /* must be called with rtnl_lock */
7421 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7426 #ifdef BNX2X_STOP_ON_ERROR
7427 if (unlikely(bp->panic))
7431 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7433 rc = bnx2x_set_int_mode(bp);
7435 if (bnx2x_alloc_mem(bp))
7438 for_each_rx_queue(bp, i)
7439 bnx2x_fp(bp, i, disable_tpa) =
7440 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7442 for_each_rx_queue(bp, i)
7443 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7446 bnx2x_napi_enable(bp);
7448 if (bp->flags & USING_MSIX_FLAG) {
7449 rc = bnx2x_req_msix_irqs(bp);
7451 pci_disable_msix(bp->pdev);
7455 /* Fall to INTx if failed to enable MSI-X due to lack of
7456 memory (in bnx2x_set_int_mode()) */
7457 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7458 bnx2x_enable_msi(bp);
7460 rc = bnx2x_req_irq(bp);
7462 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7463 if (bp->flags & USING_MSI_FLAG)
7464 pci_disable_msi(bp->pdev);
7467 if (bp->flags & USING_MSI_FLAG) {
7468 bp->dev->irq = bp->pdev->irq;
7469 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7470 bp->dev->name, bp->pdev->irq);
7474 /* Send LOAD_REQUEST command to MCP
7475 Returns the type of LOAD command:
7476 if it is the first port to be initialized
7477 common blocks should be initialized, otherwise - not
7479 if (!BP_NOMCP(bp)) {
7480 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7482 BNX2X_ERR("MCP response failure, aborting\n");
7486 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7487 rc = -EBUSY; /* other port in diagnostic mode */
7492 int port = BP_PORT(bp);
7494 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7495 load_count[0], load_count[1], load_count[2]);
7497 load_count[1 + port]++;
7498 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7499 load_count[0], load_count[1], load_count[2]);
7500 if (load_count[0] == 1)
7501 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7502 else if (load_count[1 + port] == 1)
7503 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7505 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7508 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7509 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7513 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7516 rc = bnx2x_init_hw(bp, load_code);
7518 BNX2X_ERR("HW init failed, aborting\n");
7522 /* Setup NIC internals and enable interrupts */
7523 bnx2x_nic_init(bp, load_code);
7525 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7526 (bp->common.shmem2_base))
7527 SHMEM2_WR(bp, dcc_support,
7528 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7529 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7531 /* Send LOAD_DONE command to MCP */
7532 if (!BP_NOMCP(bp)) {
7533 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7535 BNX2X_ERR("MCP response failure, aborting\n");
7541 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7543 rc = bnx2x_setup_leading(bp);
7545 BNX2X_ERR("Setup leading failed!\n");
7546 #ifndef BNX2X_STOP_ON_ERROR
7554 if (CHIP_IS_E1H(bp))
7555 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7556 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7557 bp->state = BNX2X_STATE_DISABLED;
7560 if (bp->state == BNX2X_STATE_OPEN) {
7562 /* Enable Timer scan */
7563 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7565 for_each_nondefault_queue(bp, i) {
7566 rc = bnx2x_setup_multi(bp, i);
7576 bnx2x_set_eth_mac_addr_e1(bp, 1);
7578 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7582 bnx2x_initial_phy_init(bp, load_mode);
7584 /* Start fast path */
7585 switch (load_mode) {
7587 if (bp->state == BNX2X_STATE_OPEN) {
7588 /* Tx queue should be only reenabled */
7589 netif_tx_wake_all_queues(bp->dev);
7591 /* Initialize the receive filter. */
7592 bnx2x_set_rx_mode(bp->dev);
7596 netif_tx_start_all_queues(bp->dev);
7597 if (bp->state != BNX2X_STATE_OPEN)
7598 netif_tx_disable(bp->dev);
7599 /* Initialize the receive filter. */
7600 bnx2x_set_rx_mode(bp->dev);
7604 /* Initialize the receive filter. */
7605 bnx2x_set_rx_mode(bp->dev);
7606 bp->state = BNX2X_STATE_DIAG;
7614 bnx2x__link_status_update(bp);
7616 /* start the timer */
7617 mod_timer(&bp->timer, jiffies + bp->current_interval);
7624 /* Disable Timer scan */
7625 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7628 bnx2x_int_disable_sync(bp, 1);
7629 if (!BP_NOMCP(bp)) {
7630 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7631 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7634 /* Free SKBs, SGEs, TPA pool and driver internals */
7635 bnx2x_free_skbs(bp);
7636 for_each_rx_queue(bp, i)
7637 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7642 bnx2x_napi_disable(bp);
7643 for_each_rx_queue(bp, i)
7644 netif_napi_del(&bnx2x_fp(bp, i, napi));
7650 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7652 struct bnx2x_fastpath *fp = &bp->fp[index];
7655 /* halt the connection */
7656 fp->state = BNX2X_FP_STATE_HALTING;
7657 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7659 /* Wait for completion */
7660 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7662 if (rc) /* timeout */
7665 /* delete cfc entry */
7666 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7668 /* Wait for completion */
7669 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7674 static int bnx2x_stop_leading(struct bnx2x *bp)
7676 __le16 dsb_sp_prod_idx;
7677 /* if the other port is handling traffic,
7678 this can take a lot of time */
7684 /* Send HALT ramrod */
7685 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7686 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7688 /* Wait for completion */
7689 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7690 &(bp->fp[0].state), 1);
7691 if (rc) /* timeout */
7694 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7696 /* Send PORT_DELETE ramrod */
7697 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7699 /* Wait for completion to arrive on default status block
7700 we are going to reset the chip anyway
7701 so there is not much to do if this times out
7703 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7705 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7706 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7707 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7708 #ifdef BNX2X_STOP_ON_ERROR
7716 rmb(); /* Refresh the dsb_sp_prod */
7718 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7719 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7724 static void bnx2x_reset_func(struct bnx2x *bp)
7726 int port = BP_PORT(bp);
7727 int func = BP_FUNC(bp);
7731 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7732 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7735 /* Disable Timer scan */
7736 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7738 * Wait for at least 10ms and up to 2 second for the timers scan to
7741 for (i = 0; i < 200; i++) {
7743 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7748 base = FUNC_ILT_BASE(func);
7749 for (i = base; i < base + ILT_PER_FUNC; i++)
7750 bnx2x_ilt_wr(bp, i, 0);
7753 static void bnx2x_reset_port(struct bnx2x *bp)
7755 int port = BP_PORT(bp);
7758 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7760 /* Do not rcv packets to BRB */
7761 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7762 /* Do not direct rcv packets that are not for MCP to the BRB */
7763 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7764 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7767 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7770 /* Check for BRB port occupancy */
7771 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7773 DP(NETIF_MSG_IFDOWN,
7774 "BRB1 is not empty %d blocks are occupied\n", val);
7776 /* TODO: Close Doorbell port? */
7779 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7781 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7782 BP_FUNC(bp), reset_code);
7784 switch (reset_code) {
7785 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7786 bnx2x_reset_port(bp);
7787 bnx2x_reset_func(bp);
7788 bnx2x_reset_common(bp);
7791 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7792 bnx2x_reset_port(bp);
7793 bnx2x_reset_func(bp);
7796 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7797 bnx2x_reset_func(bp);
7801 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7806 /* must be called with rtnl_lock */
7807 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7809 int port = BP_PORT(bp);
7813 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7815 /* Set "drop all" */
7816 bp->rx_mode = BNX2X_RX_MODE_NONE;
7817 bnx2x_set_storm_rx_mode(bp);
7819 /* Disable HW interrupts, NAPI and Tx */
7820 bnx2x_netif_stop(bp, 1);
7822 del_timer_sync(&bp->timer);
7823 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7824 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7825 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7830 /* Wait until tx fastpath tasks complete */
7831 for_each_tx_queue(bp, i) {
7832 struct bnx2x_fastpath *fp = &bp->fp[i];
7835 while (bnx2x_has_tx_work_unload(fp)) {
7839 BNX2X_ERR("timeout waiting for queue[%d]\n",
7841 #ifdef BNX2X_STOP_ON_ERROR
7852 /* Give HW time to discard old tx messages */
7855 if (CHIP_IS_E1(bp)) {
7856 struct mac_configuration_cmd *config =
7857 bnx2x_sp(bp, mcast_config);
7859 bnx2x_set_eth_mac_addr_e1(bp, 0);
7861 for (i = 0; i < config->hdr.length; i++)
7862 CAM_INVALIDATE(config->config_table[i]);
7864 config->hdr.length = i;
7865 if (CHIP_REV_IS_SLOW(bp))
7866 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7868 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7869 config->hdr.client_id = bp->fp->cl_id;
7870 config->hdr.reserved1 = 0;
7872 bp->set_mac_pending++;
7875 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7876 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7877 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7880 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7882 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7884 for (i = 0; i < MC_HASH_SIZE; i++)
7885 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7887 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7890 if (unload_mode == UNLOAD_NORMAL)
7891 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7893 else if (bp->flags & NO_WOL_FLAG)
7894 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7897 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7898 u8 *mac_addr = bp->dev->dev_addr;
7900 /* The mac address is written to entries 1-4 to
7901 preserve entry 0 which is used by the PMF */
7902 u8 entry = (BP_E1HVN(bp) + 1)*8;
7904 val = (mac_addr[0] << 8) | mac_addr[1];
7905 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7907 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7908 (mac_addr[4] << 8) | mac_addr[5];
7909 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7911 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7914 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7916 /* Close multi and leading connections
7917 Completions for ramrods are collected in a synchronous way */
7918 for_each_nondefault_queue(bp, i)
7919 if (bnx2x_stop_multi(bp, i))
7922 rc = bnx2x_stop_leading(bp);
7924 BNX2X_ERR("Stop leading failed!\n");
7925 #ifdef BNX2X_STOP_ON_ERROR
7934 reset_code = bnx2x_fw_command(bp, reset_code);
7936 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7937 load_count[0], load_count[1], load_count[2]);
7939 load_count[1 + port]--;
7940 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7941 load_count[0], load_count[1], load_count[2]);
7942 if (load_count[0] == 0)
7943 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7944 else if (load_count[1 + port] == 0)
7945 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7947 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7950 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7951 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7952 bnx2x__link_reset(bp);
7954 /* Reset the chip */
7955 bnx2x_reset_chip(bp, reset_code);
7957 /* Report UNLOAD_DONE to MCP */
7959 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7963 /* Free SKBs, SGEs, TPA pool and driver internals */
7964 bnx2x_free_skbs(bp);
7965 for_each_rx_queue(bp, i)
7966 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7967 for_each_rx_queue(bp, i)
7968 netif_napi_del(&bnx2x_fp(bp, i, napi));
7971 bp->state = BNX2X_STATE_CLOSED;
7973 netif_carrier_off(bp->dev);
7978 static void bnx2x_reset_task(struct work_struct *work)
7980 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7982 #ifdef BNX2X_STOP_ON_ERROR
7983 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7984 " so reset not done to allow debug dump,\n"
7985 " you will need to reboot when done\n");
7991 if (!netif_running(bp->dev))
7992 goto reset_task_exit;
7994 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7995 bnx2x_nic_load(bp, LOAD_NORMAL);
8001 /* end of nic load/unload */
8006 * Init service functions
8009 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8012 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8013 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8014 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8015 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8016 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8017 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8018 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8019 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8021 BNX2X_ERR("Unsupported function index: %d\n", func);
8026 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8028 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8030 /* Flush all outstanding writes */
8033 /* Pretend to be function 0 */
8035 /* Flush the GRC transaction (in the chip) */
8036 new_val = REG_RD(bp, reg);
8038 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8043 /* From now we are in the "like-E1" mode */
8044 bnx2x_int_disable(bp);
8046 /* Flush all outstanding writes */
8049 /* Restore the original funtion settings */
8050 REG_WR(bp, reg, orig_func);
8051 new_val = REG_RD(bp, reg);
8052 if (new_val != orig_func) {
8053 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8054 orig_func, new_val);
8059 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8061 if (CHIP_IS_E1H(bp))
8062 bnx2x_undi_int_disable_e1h(bp, func);
8064 bnx2x_int_disable(bp);
8067 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8071 /* Check if there is any driver already loaded */
8072 val = REG_RD(bp, MISC_REG_UNPREPARED);
8074 /* Check if it is the UNDI driver
8075 * UNDI driver initializes CID offset for normal bell to 0x7
8077 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8078 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8080 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8082 int func = BP_FUNC(bp);
8086 /* clear the UNDI indication */
8087 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8089 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8091 /* try unload UNDI on port 0 */
8094 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8095 DRV_MSG_SEQ_NUMBER_MASK);
8096 reset_code = bnx2x_fw_command(bp, reset_code);
8098 /* if UNDI is loaded on the other port */
8099 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8101 /* send "DONE" for previous unload */
8102 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8104 /* unload UNDI on port 1 */
8107 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8108 DRV_MSG_SEQ_NUMBER_MASK);
8109 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8111 bnx2x_fw_command(bp, reset_code);
8114 /* now it's safe to release the lock */
8115 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8117 bnx2x_undi_int_disable(bp, func);
8119 /* close input traffic and wait for it */
8120 /* Do not rcv packets to BRB */
8122 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8123 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8124 /* Do not direct rcv packets that are not for MCP to
8127 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8128 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8131 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8132 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8135 /* save NIG port swap info */
8136 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8137 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8140 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8143 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8145 /* take the NIG out of reset and restore swap values */
8147 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8148 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8149 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8150 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8152 /* send unload done to the MCP */
8153 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8155 /* restore our func and fw_seq */
8158 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8159 DRV_MSG_SEQ_NUMBER_MASK);
8162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8166 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8168 u32 val, val2, val3, val4, id;
8171 /* Get the chip revision id and number. */
8172 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8173 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8174 id = ((val & 0xffff) << 16);
8175 val = REG_RD(bp, MISC_REG_CHIP_REV);
8176 id |= ((val & 0xf) << 12);
8177 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8178 id |= ((val & 0xff) << 4);
8179 val = REG_RD(bp, MISC_REG_BOND_ID);
8181 bp->common.chip_id = id;
8182 bp->link_params.chip_id = bp->common.chip_id;
8183 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8185 val = (REG_RD(bp, 0x2874) & 0x55);
8186 if ((bp->common.chip_id & 0x1) ||
8187 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8188 bp->flags |= ONE_PORT_FLAG;
8189 BNX2X_DEV_INFO("single port device\n");
8192 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8193 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8194 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8195 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8196 bp->common.flash_size, bp->common.flash_size);
8198 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8199 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8200 bp->link_params.shmem_base = bp->common.shmem_base;
8201 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8202 bp->common.shmem_base, bp->common.shmem2_base);
8204 if (!bp->common.shmem_base ||
8205 (bp->common.shmem_base < 0xA0000) ||
8206 (bp->common.shmem_base >= 0xC0000)) {
8207 BNX2X_DEV_INFO("MCP not active\n");
8208 bp->flags |= NO_MCP_FLAG;
8212 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8213 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8214 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8215 BNX2X_ERR("BAD MCP validity signature\n");
8217 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8218 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8220 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8221 SHARED_HW_CFG_LED_MODE_MASK) >>
8222 SHARED_HW_CFG_LED_MODE_SHIFT);
8224 bp->link_params.feature_config_flags = 0;
8225 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8226 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8227 bp->link_params.feature_config_flags |=
8228 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8230 bp->link_params.feature_config_flags &=
8231 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8233 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8234 bp->common.bc_ver = val;
8235 BNX2X_DEV_INFO("bc_ver %X\n", val);
8236 if (val < BNX2X_BC_VER) {
8237 /* for now only warn
8238 * later we might need to enforce this */
8239 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8240 " please upgrade BC\n", BNX2X_BC_VER, val);
8242 bp->link_params.feature_config_flags |=
8243 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8244 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8246 if (BP_E1HVN(bp) == 0) {
8247 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8248 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8250 /* no WOL capability for E1HVN != 0 */
8251 bp->flags |= NO_WOL_FLAG;
8253 BNX2X_DEV_INFO("%sWoL capable\n",
8254 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8256 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8257 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8258 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8259 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8261 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8262 val, val2, val3, val4);
8265 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8268 int port = BP_PORT(bp);
8271 switch (switch_cfg) {
8273 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8276 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8277 switch (ext_phy_type) {
8278 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8279 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8282 bp->port.supported |= (SUPPORTED_10baseT_Half |
8283 SUPPORTED_10baseT_Full |
8284 SUPPORTED_100baseT_Half |
8285 SUPPORTED_100baseT_Full |
8286 SUPPORTED_1000baseT_Full |
8287 SUPPORTED_2500baseX_Full |
8292 SUPPORTED_Asym_Pause);
8295 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8296 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8299 bp->port.supported |= (SUPPORTED_10baseT_Half |
8300 SUPPORTED_10baseT_Full |
8301 SUPPORTED_100baseT_Half |
8302 SUPPORTED_100baseT_Full |
8303 SUPPORTED_1000baseT_Full |
8308 SUPPORTED_Asym_Pause);
8312 BNX2X_ERR("NVRAM config error. "
8313 "BAD SerDes ext_phy_config 0x%x\n",
8314 bp->link_params.ext_phy_config);
8318 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8320 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8323 case SWITCH_CFG_10G:
8324 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8327 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8328 switch (ext_phy_type) {
8329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8330 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8333 bp->port.supported |= (SUPPORTED_10baseT_Half |
8334 SUPPORTED_10baseT_Full |
8335 SUPPORTED_100baseT_Half |
8336 SUPPORTED_100baseT_Full |
8337 SUPPORTED_1000baseT_Full |
8338 SUPPORTED_2500baseX_Full |
8339 SUPPORTED_10000baseT_Full |
8344 SUPPORTED_Asym_Pause);
8347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8348 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8351 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8352 SUPPORTED_1000baseT_Full |
8356 SUPPORTED_Asym_Pause);
8359 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8360 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8363 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8364 SUPPORTED_2500baseX_Full |
8365 SUPPORTED_1000baseT_Full |
8369 SUPPORTED_Asym_Pause);
8372 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8373 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8376 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8379 SUPPORTED_Asym_Pause);
8382 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8383 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8386 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8387 SUPPORTED_1000baseT_Full |
8390 SUPPORTED_Asym_Pause);
8393 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8394 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8397 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8398 SUPPORTED_1000baseT_Full |
8402 SUPPORTED_Asym_Pause);
8405 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8406 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8409 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8410 SUPPORTED_1000baseT_Full |
8414 SUPPORTED_Asym_Pause);
8417 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8418 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8421 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8425 SUPPORTED_Asym_Pause);
8428 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8429 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8432 bp->port.supported |= (SUPPORTED_10baseT_Half |
8433 SUPPORTED_10baseT_Full |
8434 SUPPORTED_100baseT_Half |
8435 SUPPORTED_100baseT_Full |
8436 SUPPORTED_1000baseT_Full |
8437 SUPPORTED_10000baseT_Full |
8441 SUPPORTED_Asym_Pause);
8444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8445 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8446 bp->link_params.ext_phy_config);
8450 BNX2X_ERR("NVRAM config error. "
8451 "BAD XGXS ext_phy_config 0x%x\n",
8452 bp->link_params.ext_phy_config);
8456 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8458 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8463 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8464 bp->port.link_config);
8467 bp->link_params.phy_addr = bp->port.phy_addr;
8469 /* mask what we support according to speed_cap_mask */
8470 if (!(bp->link_params.speed_cap_mask &
8471 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8472 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8474 if (!(bp->link_params.speed_cap_mask &
8475 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8476 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8478 if (!(bp->link_params.speed_cap_mask &
8479 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8480 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8482 if (!(bp->link_params.speed_cap_mask &
8483 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8484 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8486 if (!(bp->link_params.speed_cap_mask &
8487 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8488 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8489 SUPPORTED_1000baseT_Full);
8491 if (!(bp->link_params.speed_cap_mask &
8492 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8493 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8495 if (!(bp->link_params.speed_cap_mask &
8496 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8497 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8499 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8502 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8504 bp->link_params.req_duplex = DUPLEX_FULL;
8506 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8507 case PORT_FEATURE_LINK_SPEED_AUTO:
8508 if (bp->port.supported & SUPPORTED_Autoneg) {
8509 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8510 bp->port.advertising = bp->port.supported;
8513 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8515 if ((ext_phy_type ==
8516 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8518 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8519 /* force 10G, no AN */
8520 bp->link_params.req_line_speed = SPEED_10000;
8521 bp->port.advertising =
8522 (ADVERTISED_10000baseT_Full |
8526 BNX2X_ERR("NVRAM config error. "
8527 "Invalid link_config 0x%x"
8528 " Autoneg not supported\n",
8529 bp->port.link_config);
8534 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8535 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8536 bp->link_params.req_line_speed = SPEED_10;
8537 bp->port.advertising = (ADVERTISED_10baseT_Full |
8540 BNX2X_ERR("NVRAM config error. "
8541 "Invalid link_config 0x%x"
8542 " speed_cap_mask 0x%x\n",
8543 bp->port.link_config,
8544 bp->link_params.speed_cap_mask);
8549 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8550 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8551 bp->link_params.req_line_speed = SPEED_10;
8552 bp->link_params.req_duplex = DUPLEX_HALF;
8553 bp->port.advertising = (ADVERTISED_10baseT_Half |
8556 BNX2X_ERR("NVRAM config error. "
8557 "Invalid link_config 0x%x"
8558 " speed_cap_mask 0x%x\n",
8559 bp->port.link_config,
8560 bp->link_params.speed_cap_mask);
8565 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8566 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8567 bp->link_params.req_line_speed = SPEED_100;
8568 bp->port.advertising = (ADVERTISED_100baseT_Full |
8571 BNX2X_ERR("NVRAM config error. "
8572 "Invalid link_config 0x%x"
8573 " speed_cap_mask 0x%x\n",
8574 bp->port.link_config,
8575 bp->link_params.speed_cap_mask);
8580 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8581 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8582 bp->link_params.req_line_speed = SPEED_100;
8583 bp->link_params.req_duplex = DUPLEX_HALF;
8584 bp->port.advertising = (ADVERTISED_100baseT_Half |
8587 BNX2X_ERR("NVRAM config error. "
8588 "Invalid link_config 0x%x"
8589 " speed_cap_mask 0x%x\n",
8590 bp->port.link_config,
8591 bp->link_params.speed_cap_mask);
8596 case PORT_FEATURE_LINK_SPEED_1G:
8597 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8598 bp->link_params.req_line_speed = SPEED_1000;
8599 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8602 BNX2X_ERR("NVRAM config error. "
8603 "Invalid link_config 0x%x"
8604 " speed_cap_mask 0x%x\n",
8605 bp->port.link_config,
8606 bp->link_params.speed_cap_mask);
8611 case PORT_FEATURE_LINK_SPEED_2_5G:
8612 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8613 bp->link_params.req_line_speed = SPEED_2500;
8614 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8617 BNX2X_ERR("NVRAM config error. "
8618 "Invalid link_config 0x%x"
8619 " speed_cap_mask 0x%x\n",
8620 bp->port.link_config,
8621 bp->link_params.speed_cap_mask);
8626 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8627 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8628 case PORT_FEATURE_LINK_SPEED_10G_KR:
8629 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8630 bp->link_params.req_line_speed = SPEED_10000;
8631 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8634 BNX2X_ERR("NVRAM config error. "
8635 "Invalid link_config 0x%x"
8636 " speed_cap_mask 0x%x\n",
8637 bp->port.link_config,
8638 bp->link_params.speed_cap_mask);
8644 BNX2X_ERR("NVRAM config error. "
8645 "BAD link speed link_config 0x%x\n",
8646 bp->port.link_config);
8647 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8648 bp->port.advertising = bp->port.supported;
8652 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8653 PORT_FEATURE_FLOW_CONTROL_MASK);
8654 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8655 !(bp->port.supported & SUPPORTED_Autoneg))
8656 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8658 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8659 " advertising 0x%x\n",
8660 bp->link_params.req_line_speed,
8661 bp->link_params.req_duplex,
8662 bp->link_params.req_flow_ctrl, bp->port.advertising);
8665 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8667 mac_hi = cpu_to_be16(mac_hi);
8668 mac_lo = cpu_to_be32(mac_lo);
8669 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8670 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8673 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8675 int port = BP_PORT(bp);
8681 bp->link_params.bp = bp;
8682 bp->link_params.port = port;
8684 bp->link_params.lane_config =
8685 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8686 bp->link_params.ext_phy_config =
8688 dev_info.port_hw_config[port].external_phy_config);
8689 /* BCM8727_NOC => BCM8727 no over current */
8690 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8691 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8692 bp->link_params.ext_phy_config &=
8693 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8694 bp->link_params.ext_phy_config |=
8695 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8696 bp->link_params.feature_config_flags |=
8697 FEATURE_CONFIG_BCM8727_NOC;
8700 bp->link_params.speed_cap_mask =
8702 dev_info.port_hw_config[port].speed_capability_mask);
8704 bp->port.link_config =
8705 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8707 /* Get the 4 lanes xgxs config rx and tx */
8708 for (i = 0; i < 2; i++) {
8710 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8711 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8712 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8715 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8716 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8717 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8720 /* If the device is capable of WoL, set the default state according
8723 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8724 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8725 (config & PORT_FEATURE_WOL_ENABLED));
8727 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8728 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8729 bp->link_params.lane_config,
8730 bp->link_params.ext_phy_config,
8731 bp->link_params.speed_cap_mask, bp->port.link_config);
8733 bp->link_params.switch_cfg |= (bp->port.link_config &
8734 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8735 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8737 bnx2x_link_settings_requested(bp);
8740 * If connected directly, work with the internal PHY, otherwise, work
8741 * with the external PHY
8743 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8744 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8745 bp->mdio.prtad = bp->link_params.phy_addr;
8747 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8748 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8750 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8752 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8753 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8754 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8755 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8756 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8759 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8760 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8761 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8765 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8767 int func = BP_FUNC(bp);
8771 bnx2x_get_common_hwinfo(bp);
8775 if (CHIP_IS_E1H(bp)) {
8777 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8779 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8780 FUNC_MF_CFG_E1HOV_TAG_MASK);
8781 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8783 BNX2X_DEV_INFO("%s function mode\n",
8784 IS_E1HMF(bp) ? "multi" : "single");
8787 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8789 FUNC_MF_CFG_E1HOV_TAG_MASK);
8790 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8792 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8794 func, bp->e1hov, bp->e1hov);
8796 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8797 " aborting\n", func);
8802 BNX2X_ERR("!!! VN %d in single function mode,"
8803 " aborting\n", BP_E1HVN(bp));
8809 if (!BP_NOMCP(bp)) {
8810 bnx2x_get_port_hwinfo(bp);
8812 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8813 DRV_MSG_SEQ_NUMBER_MASK);
8814 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8818 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8819 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8820 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8821 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8822 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8823 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8824 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8825 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8826 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8827 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8828 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8830 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8838 /* only supposed to happen on emulation/FPGA */
8839 BNX2X_ERR("warning random MAC workaround active\n");
8840 random_ether_addr(bp->dev->dev_addr);
8841 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8847 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8849 int func = BP_FUNC(bp);
8853 /* Disable interrupt handling until HW is initialized */
8854 atomic_set(&bp->intr_sem, 1);
8855 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8857 mutex_init(&bp->port.phy_mutex);
8859 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8860 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8862 rc = bnx2x_get_hwinfo(bp);
8864 /* need to reset chip if undi was active */
8866 bnx2x_undi_unload(bp);
8868 if (CHIP_REV_IS_FPGA(bp))
8869 printk(KERN_ERR PFX "FPGA detected\n");
8871 if (BP_NOMCP(bp) && (func == 0))
8873 "MCP disabled, must load devices in order!\n");
8875 /* Set multi queue mode */
8876 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8877 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8879 "Multi disabled since int_mode requested is not MSI-X\n");
8880 multi_mode = ETH_RSS_MODE_DISABLED;
8882 bp->multi_mode = multi_mode;
8887 bp->flags &= ~TPA_ENABLE_FLAG;
8888 bp->dev->features &= ~NETIF_F_LRO;
8890 bp->flags |= TPA_ENABLE_FLAG;
8891 bp->dev->features |= NETIF_F_LRO;
8895 bp->dropless_fc = 0;
8897 bp->dropless_fc = dropless_fc;
8901 bp->tx_ring_size = MAX_TX_AVAIL;
8902 bp->rx_ring_size = MAX_RX_AVAIL;
8909 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8910 bp->current_interval = (poll ? poll : timer_interval);
8912 init_timer(&bp->timer);
8913 bp->timer.expires = jiffies + bp->current_interval;
8914 bp->timer.data = (unsigned long) bp;
8915 bp->timer.function = bnx2x_timer;
8921 * ethtool service functions
8924 /* All ethtool functions called with rtnl_lock */
8926 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8928 struct bnx2x *bp = netdev_priv(dev);
8930 cmd->supported = bp->port.supported;
8931 cmd->advertising = bp->port.advertising;
8933 if (netif_carrier_ok(dev)) {
8934 cmd->speed = bp->link_vars.line_speed;
8935 cmd->duplex = bp->link_vars.duplex;
8937 cmd->speed = bp->link_params.req_line_speed;
8938 cmd->duplex = bp->link_params.req_duplex;
8943 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8944 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8945 if (vn_max_rate < cmd->speed)
8946 cmd->speed = vn_max_rate;
8949 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8951 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8953 switch (ext_phy_type) {
8954 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8955 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8956 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8957 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8961 cmd->port = PORT_FIBRE;
8964 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8965 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8966 cmd->port = PORT_TP;
8969 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8970 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8971 bp->link_params.ext_phy_config);
8975 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8976 bp->link_params.ext_phy_config);
8980 cmd->port = PORT_TP;
8982 cmd->phy_address = bp->mdio.prtad;
8983 cmd->transceiver = XCVR_INTERNAL;
8985 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8986 cmd->autoneg = AUTONEG_ENABLE;
8988 cmd->autoneg = AUTONEG_DISABLE;
8993 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8994 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8995 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8996 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8997 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8998 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8999 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9004 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9006 struct bnx2x *bp = netdev_priv(dev);
9012 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9013 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9014 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9015 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9016 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9017 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9018 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9020 if (cmd->autoneg == AUTONEG_ENABLE) {
9021 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9022 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9026 /* advertise the requested speed and duplex if supported */
9027 cmd->advertising &= bp->port.supported;
9029 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9030 bp->link_params.req_duplex = DUPLEX_FULL;
9031 bp->port.advertising |= (ADVERTISED_Autoneg |
9034 } else { /* forced speed */
9035 /* advertise the requested speed and duplex if supported */
9036 switch (cmd->speed) {
9038 if (cmd->duplex == DUPLEX_FULL) {
9039 if (!(bp->port.supported &
9040 SUPPORTED_10baseT_Full)) {
9042 "10M full not supported\n");
9046 advertising = (ADVERTISED_10baseT_Full |
9049 if (!(bp->port.supported &
9050 SUPPORTED_10baseT_Half)) {
9052 "10M half not supported\n");
9056 advertising = (ADVERTISED_10baseT_Half |
9062 if (cmd->duplex == DUPLEX_FULL) {
9063 if (!(bp->port.supported &
9064 SUPPORTED_100baseT_Full)) {
9066 "100M full not supported\n");
9070 advertising = (ADVERTISED_100baseT_Full |
9073 if (!(bp->port.supported &
9074 SUPPORTED_100baseT_Half)) {
9076 "100M half not supported\n");
9080 advertising = (ADVERTISED_100baseT_Half |
9086 if (cmd->duplex != DUPLEX_FULL) {
9087 DP(NETIF_MSG_LINK, "1G half not supported\n");
9091 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9092 DP(NETIF_MSG_LINK, "1G full not supported\n");
9096 advertising = (ADVERTISED_1000baseT_Full |
9101 if (cmd->duplex != DUPLEX_FULL) {
9103 "2.5G half not supported\n");
9107 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9109 "2.5G full not supported\n");
9113 advertising = (ADVERTISED_2500baseX_Full |
9118 if (cmd->duplex != DUPLEX_FULL) {
9119 DP(NETIF_MSG_LINK, "10G half not supported\n");
9123 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9124 DP(NETIF_MSG_LINK, "10G full not supported\n");
9128 advertising = (ADVERTISED_10000baseT_Full |
9133 DP(NETIF_MSG_LINK, "Unsupported speed\n");
9137 bp->link_params.req_line_speed = cmd->speed;
9138 bp->link_params.req_duplex = cmd->duplex;
9139 bp->port.advertising = advertising;
9142 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9143 DP_LEVEL " req_duplex %d advertising 0x%x\n",
9144 bp->link_params.req_line_speed, bp->link_params.req_duplex,
9145 bp->port.advertising);
9147 if (netif_running(dev)) {
9148 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9155 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9156 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9158 static int bnx2x_get_regs_len(struct net_device *dev)
9160 struct bnx2x *bp = netdev_priv(dev);
9161 int regdump_len = 0;
9164 if (CHIP_IS_E1(bp)) {
9165 for (i = 0; i < REGS_COUNT; i++)
9166 if (IS_E1_ONLINE(reg_addrs[i].info))
9167 regdump_len += reg_addrs[i].size;
9169 for (i = 0; i < WREGS_COUNT_E1; i++)
9170 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9171 regdump_len += wreg_addrs_e1[i].size *
9172 (1 + wreg_addrs_e1[i].read_regs_count);
9175 for (i = 0; i < REGS_COUNT; i++)
9176 if (IS_E1H_ONLINE(reg_addrs[i].info))
9177 regdump_len += reg_addrs[i].size;
9179 for (i = 0; i < WREGS_COUNT_E1H; i++)
9180 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9181 regdump_len += wreg_addrs_e1h[i].size *
9182 (1 + wreg_addrs_e1h[i].read_regs_count);
9185 regdump_len += sizeof(struct dump_hdr);
9190 static void bnx2x_get_regs(struct net_device *dev,
9191 struct ethtool_regs *regs, void *_p)
9194 struct bnx2x *bp = netdev_priv(dev);
9195 struct dump_hdr dump_hdr = {0};
9198 memset(p, 0, regs->len);
9200 if (!netif_running(bp->dev))
9203 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9204 dump_hdr.dump_sign = dump_sign_all;
9205 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9206 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9207 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9208 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9209 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9211 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9212 p += dump_hdr.hdr_size + 1;
9214 if (CHIP_IS_E1(bp)) {
9215 for (i = 0; i < REGS_COUNT; i++)
9216 if (IS_E1_ONLINE(reg_addrs[i].info))
9217 for (j = 0; j < reg_addrs[i].size; j++)
9219 reg_addrs[i].addr + j*4);
9222 for (i = 0; i < REGS_COUNT; i++)
9223 if (IS_E1H_ONLINE(reg_addrs[i].info))
9224 for (j = 0; j < reg_addrs[i].size; j++)
9226 reg_addrs[i].addr + j*4);
9230 #define PHY_FW_VER_LEN 10
9232 static void bnx2x_get_drvinfo(struct net_device *dev,
9233 struct ethtool_drvinfo *info)
9235 struct bnx2x *bp = netdev_priv(dev);
9236 u8 phy_fw_ver[PHY_FW_VER_LEN];
9238 strcpy(info->driver, DRV_MODULE_NAME);
9239 strcpy(info->version, DRV_MODULE_VERSION);
9241 phy_fw_ver[0] = '\0';
9243 bnx2x_acquire_phy_lock(bp);
9244 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9245 (bp->state != BNX2X_STATE_CLOSED),
9246 phy_fw_ver, PHY_FW_VER_LEN);
9247 bnx2x_release_phy_lock(bp);
9250 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9251 (bp->common.bc_ver & 0xff0000) >> 16,
9252 (bp->common.bc_ver & 0xff00) >> 8,
9253 (bp->common.bc_ver & 0xff),
9254 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9255 strcpy(info->bus_info, pci_name(bp->pdev));
9256 info->n_stats = BNX2X_NUM_STATS;
9257 info->testinfo_len = BNX2X_NUM_TESTS;
9258 info->eedump_len = bp->common.flash_size;
9259 info->regdump_len = bnx2x_get_regs_len(dev);
9262 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9264 struct bnx2x *bp = netdev_priv(dev);
9266 if (bp->flags & NO_WOL_FLAG) {
9270 wol->supported = WAKE_MAGIC;
9272 wol->wolopts = WAKE_MAGIC;
9276 memset(&wol->sopass, 0, sizeof(wol->sopass));
9279 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9281 struct bnx2x *bp = netdev_priv(dev);
9283 if (wol->wolopts & ~WAKE_MAGIC)
9286 if (wol->wolopts & WAKE_MAGIC) {
9287 if (bp->flags & NO_WOL_FLAG)
9297 static u32 bnx2x_get_msglevel(struct net_device *dev)
9299 struct bnx2x *bp = netdev_priv(dev);
9301 return bp->msglevel;
9304 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9306 struct bnx2x *bp = netdev_priv(dev);
9308 if (capable(CAP_NET_ADMIN))
9309 bp->msglevel = level;
9312 static int bnx2x_nway_reset(struct net_device *dev)
9314 struct bnx2x *bp = netdev_priv(dev);
9319 if (netif_running(dev)) {
9320 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9327 static u32 bnx2x_get_link(struct net_device *dev)
9329 struct bnx2x *bp = netdev_priv(dev);
9331 return bp->link_vars.link_up;
9334 static int bnx2x_get_eeprom_len(struct net_device *dev)
9336 struct bnx2x *bp = netdev_priv(dev);
9338 return bp->common.flash_size;
9341 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9343 int port = BP_PORT(bp);
9347 /* adjust timeout for emulation/FPGA */
9348 count = NVRAM_TIMEOUT_COUNT;
9349 if (CHIP_REV_IS_SLOW(bp))
9352 /* request access to nvram interface */
9353 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9354 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9356 for (i = 0; i < count*10; i++) {
9357 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9358 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9364 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9365 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9372 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9374 int port = BP_PORT(bp);
9378 /* adjust timeout for emulation/FPGA */
9379 count = NVRAM_TIMEOUT_COUNT;
9380 if (CHIP_REV_IS_SLOW(bp))
9383 /* relinquish nvram interface */
9384 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9385 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9387 for (i = 0; i < count*10; i++) {
9388 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9389 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9395 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9396 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9403 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9407 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9409 /* enable both bits, even on read */
9410 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9411 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9412 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9415 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9419 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9421 /* disable both bits, even after read */
9422 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9423 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9424 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9427 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9433 /* build the command word */
9434 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9436 /* need to clear DONE bit separately */
9437 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9439 /* address of the NVRAM to read from */
9440 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9441 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9443 /* issue a read command */
9444 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9446 /* adjust timeout for emulation/FPGA */
9447 count = NVRAM_TIMEOUT_COUNT;
9448 if (CHIP_REV_IS_SLOW(bp))
9451 /* wait for completion */
9454 for (i = 0; i < count; i++) {
9456 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9458 if (val & MCPR_NVM_COMMAND_DONE) {
9459 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9460 /* we read nvram data in cpu order
9461 * but ethtool sees it as an array of bytes
9462 * converting to big-endian will do the work */
9463 *ret_val = cpu_to_be32(val);
9472 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9479 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9481 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9486 if (offset + buf_size > bp->common.flash_size) {
9487 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9488 " buf_size (0x%x) > flash_size (0x%x)\n",
9489 offset, buf_size, bp->common.flash_size);
9493 /* request access to nvram interface */
9494 rc = bnx2x_acquire_nvram_lock(bp);
9498 /* enable access to nvram interface */
9499 bnx2x_enable_nvram_access(bp);
9501 /* read the first word(s) */
9502 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9503 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9504 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9505 memcpy(ret_buf, &val, 4);
9507 /* advance to the next dword */
9508 offset += sizeof(u32);
9509 ret_buf += sizeof(u32);
9510 buf_size -= sizeof(u32);
9515 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9516 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9517 memcpy(ret_buf, &val, 4);
9520 /* disable access to nvram interface */
9521 bnx2x_disable_nvram_access(bp);
9522 bnx2x_release_nvram_lock(bp);
9527 static int bnx2x_get_eeprom(struct net_device *dev,
9528 struct ethtool_eeprom *eeprom, u8 *eebuf)
9530 struct bnx2x *bp = netdev_priv(dev);
9533 if (!netif_running(dev))
9536 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9537 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9538 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9539 eeprom->len, eeprom->len);
9541 /* parameters already validated in ethtool_get_eeprom */
9543 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9548 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9553 /* build the command word */
9554 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9556 /* need to clear DONE bit separately */
9557 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9559 /* write the data */
9560 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9562 /* address of the NVRAM to write to */
9563 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9564 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9566 /* issue the write command */
9567 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9569 /* adjust timeout for emulation/FPGA */
9570 count = NVRAM_TIMEOUT_COUNT;
9571 if (CHIP_REV_IS_SLOW(bp))
9574 /* wait for completion */
9576 for (i = 0; i < count; i++) {
9578 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9579 if (val & MCPR_NVM_COMMAND_DONE) {
9588 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9590 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9598 if (offset + buf_size > bp->common.flash_size) {
9599 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9600 " buf_size (0x%x) > flash_size (0x%x)\n",
9601 offset, buf_size, bp->common.flash_size);
9605 /* request access to nvram interface */
9606 rc = bnx2x_acquire_nvram_lock(bp);
9610 /* enable access to nvram interface */
9611 bnx2x_enable_nvram_access(bp);
9613 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9614 align_offset = (offset & ~0x03);
9615 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9618 val &= ~(0xff << BYTE_OFFSET(offset));
9619 val |= (*data_buf << BYTE_OFFSET(offset));
9621 /* nvram data is returned as an array of bytes
9622 * convert it back to cpu order */
9623 val = be32_to_cpu(val);
9625 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9629 /* disable access to nvram interface */
9630 bnx2x_disable_nvram_access(bp);
9631 bnx2x_release_nvram_lock(bp);
9636 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9644 if (buf_size == 1) /* ethtool */
9645 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9647 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9649 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9654 if (offset + buf_size > bp->common.flash_size) {
9655 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9656 " buf_size (0x%x) > flash_size (0x%x)\n",
9657 offset, buf_size, bp->common.flash_size);
9661 /* request access to nvram interface */
9662 rc = bnx2x_acquire_nvram_lock(bp);
9666 /* enable access to nvram interface */
9667 bnx2x_enable_nvram_access(bp);
9670 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9671 while ((written_so_far < buf_size) && (rc == 0)) {
9672 if (written_so_far == (buf_size - sizeof(u32)))
9673 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9674 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9675 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9676 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9677 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9679 memcpy(&val, data_buf, 4);
9681 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9683 /* advance to the next dword */
9684 offset += sizeof(u32);
9685 data_buf += sizeof(u32);
9686 written_so_far += sizeof(u32);
9690 /* disable access to nvram interface */
9691 bnx2x_disable_nvram_access(bp);
9692 bnx2x_release_nvram_lock(bp);
9697 static int bnx2x_set_eeprom(struct net_device *dev,
9698 struct ethtool_eeprom *eeprom, u8 *eebuf)
9700 struct bnx2x *bp = netdev_priv(dev);
9701 int port = BP_PORT(bp);
9704 if (!netif_running(dev))
9707 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9708 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9709 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9710 eeprom->len, eeprom->len);
9712 /* parameters already validated in ethtool_set_eeprom */
9714 /* PHY eeprom can be accessed only by the PMF */
9715 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9719 if (eeprom->magic == 0x50485950) {
9720 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9721 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9723 bnx2x_acquire_phy_lock(bp);
9724 rc |= bnx2x_link_reset(&bp->link_params,
9726 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9727 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9728 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9729 MISC_REGISTERS_GPIO_HIGH, port);
9730 bnx2x_release_phy_lock(bp);
9731 bnx2x_link_report(bp);
9733 } else if (eeprom->magic == 0x50485952) {
9734 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9735 if ((bp->state == BNX2X_STATE_OPEN) ||
9736 (bp->state == BNX2X_STATE_DISABLED)) {
9737 bnx2x_acquire_phy_lock(bp);
9738 rc |= bnx2x_link_reset(&bp->link_params,
9741 rc |= bnx2x_phy_init(&bp->link_params,
9743 bnx2x_release_phy_lock(bp);
9744 bnx2x_calc_fc_adv(bp);
9746 } else if (eeprom->magic == 0x53985943) {
9747 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9748 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9749 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9751 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9753 /* DSP Remove Download Mode */
9754 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9755 MISC_REGISTERS_GPIO_LOW, port);
9757 bnx2x_acquire_phy_lock(bp);
9759 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9761 /* wait 0.5 sec to allow it to run */
9763 bnx2x_ext_phy_hw_reset(bp, port);
9765 bnx2x_release_phy_lock(bp);
9768 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9773 static int bnx2x_get_coalesce(struct net_device *dev,
9774 struct ethtool_coalesce *coal)
9776 struct bnx2x *bp = netdev_priv(dev);
9778 memset(coal, 0, sizeof(struct ethtool_coalesce));
9780 coal->rx_coalesce_usecs = bp->rx_ticks;
9781 coal->tx_coalesce_usecs = bp->tx_ticks;
9786 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9787 static int bnx2x_set_coalesce(struct net_device *dev,
9788 struct ethtool_coalesce *coal)
9790 struct bnx2x *bp = netdev_priv(dev);
9792 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9793 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9794 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9796 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9797 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9798 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9800 if (netif_running(dev))
9801 bnx2x_update_coalesce(bp);
9806 static void bnx2x_get_ringparam(struct net_device *dev,
9807 struct ethtool_ringparam *ering)
9809 struct bnx2x *bp = netdev_priv(dev);
9811 ering->rx_max_pending = MAX_RX_AVAIL;
9812 ering->rx_mini_max_pending = 0;
9813 ering->rx_jumbo_max_pending = 0;
9815 ering->rx_pending = bp->rx_ring_size;
9816 ering->rx_mini_pending = 0;
9817 ering->rx_jumbo_pending = 0;
9819 ering->tx_max_pending = MAX_TX_AVAIL;
9820 ering->tx_pending = bp->tx_ring_size;
9823 static int bnx2x_set_ringparam(struct net_device *dev,
9824 struct ethtool_ringparam *ering)
9826 struct bnx2x *bp = netdev_priv(dev);
9829 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9830 (ering->tx_pending > MAX_TX_AVAIL) ||
9831 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9834 bp->rx_ring_size = ering->rx_pending;
9835 bp->tx_ring_size = ering->tx_pending;
9837 if (netif_running(dev)) {
9838 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9839 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9845 static void bnx2x_get_pauseparam(struct net_device *dev,
9846 struct ethtool_pauseparam *epause)
9848 struct bnx2x *bp = netdev_priv(dev);
9850 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9851 BNX2X_FLOW_CTRL_AUTO) &&
9852 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9854 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9855 BNX2X_FLOW_CTRL_RX);
9856 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9857 BNX2X_FLOW_CTRL_TX);
9859 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9860 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9861 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9864 static int bnx2x_set_pauseparam(struct net_device *dev,
9865 struct ethtool_pauseparam *epause)
9867 struct bnx2x *bp = netdev_priv(dev);
9872 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9873 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9874 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9876 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9878 if (epause->rx_pause)
9879 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9881 if (epause->tx_pause)
9882 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9884 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9885 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9887 if (epause->autoneg) {
9888 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9889 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9893 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9894 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9898 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9900 if (netif_running(dev)) {
9901 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9908 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9910 struct bnx2x *bp = netdev_priv(dev);
9914 /* TPA requires Rx CSUM offloading */
9915 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9916 if (!(dev->features & NETIF_F_LRO)) {
9917 dev->features |= NETIF_F_LRO;
9918 bp->flags |= TPA_ENABLE_FLAG;
9922 } else if (dev->features & NETIF_F_LRO) {
9923 dev->features &= ~NETIF_F_LRO;
9924 bp->flags &= ~TPA_ENABLE_FLAG;
9928 if (changed && netif_running(dev)) {
9929 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9930 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9936 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9938 struct bnx2x *bp = netdev_priv(dev);
9943 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9945 struct bnx2x *bp = netdev_priv(dev);
9950 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9951 TPA'ed packets will be discarded due to wrong TCP CSUM */
9953 u32 flags = ethtool_op_get_flags(dev);
9955 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9961 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9964 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9965 dev->features |= NETIF_F_TSO6;
9967 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9968 dev->features &= ~NETIF_F_TSO6;
9974 static const struct {
9975 char string[ETH_GSTRING_LEN];
9976 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9977 { "register_test (offline)" },
9978 { "memory_test (offline)" },
9979 { "loopback_test (offline)" },
9980 { "nvram_test (online)" },
9981 { "interrupt_test (online)" },
9982 { "link_test (online)" },
9983 { "idle check (online)" }
9986 static int bnx2x_test_registers(struct bnx2x *bp)
9988 int idx, i, rc = -ENODEV;
9990 int port = BP_PORT(bp);
9991 static const struct {
9996 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9997 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9998 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9999 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10000 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10001 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10002 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10003 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10004 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10005 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10006 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10007 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10008 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10009 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10010 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10011 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10012 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10013 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
10014 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
10015 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10016 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
10017 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10018 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10019 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10020 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10021 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10022 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10023 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10024 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
10025 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10026 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
10027 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10028 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10029 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10030 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10031 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10032 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10034 { 0xffffffff, 0, 0x00000000 }
10037 if (!netif_running(bp->dev))
10040 /* Repeat the test twice:
10041 First by writing 0x00000000, second by writing 0xffffffff */
10042 for (idx = 0; idx < 2; idx++) {
10049 wr_val = 0xffffffff;
10053 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10054 u32 offset, mask, save_val, val;
10056 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10057 mask = reg_tbl[i].mask;
10059 save_val = REG_RD(bp, offset);
10061 REG_WR(bp, offset, wr_val);
10062 val = REG_RD(bp, offset);
10064 /* Restore the original register's value */
10065 REG_WR(bp, offset, save_val);
10067 /* verify that value is as expected value */
10068 if ((val & mask) != (wr_val & mask))
10069 goto test_reg_exit;
10079 static int bnx2x_test_memory(struct bnx2x *bp)
10081 int i, j, rc = -ENODEV;
10083 static const struct {
10087 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10088 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10089 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10090 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10091 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10092 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10093 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10097 static const struct {
10103 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10104 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10105 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10106 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10107 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10108 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10110 { NULL, 0xffffffff, 0, 0 }
10113 if (!netif_running(bp->dev))
10116 /* Go through all the memories */
10117 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10118 for (j = 0; j < mem_tbl[i].size; j++)
10119 REG_RD(bp, mem_tbl[i].offset + j*4);
10121 /* Check the parity status */
10122 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10123 val = REG_RD(bp, prty_tbl[i].offset);
10124 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10125 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10127 "%s is 0x%x\n", prty_tbl[i].name, val);
10128 goto test_mem_exit;
10138 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10143 while (bnx2x_link_test(bp) && cnt--)
10147 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10149 unsigned int pkt_size, num_pkts, i;
10150 struct sk_buff *skb;
10151 unsigned char *packet;
10152 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10153 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10154 u16 tx_start_idx, tx_idx;
10155 u16 rx_start_idx, rx_idx;
10156 u16 pkt_prod, bd_prod;
10157 struct sw_tx_bd *tx_buf;
10158 struct eth_tx_start_bd *tx_start_bd;
10159 struct eth_tx_parse_bd *pbd = NULL;
10160 dma_addr_t mapping;
10161 union eth_rx_cqe *cqe;
10163 struct sw_rx_bd *rx_buf;
10167 /* check the loopback mode */
10168 switch (loopback_mode) {
10169 case BNX2X_PHY_LOOPBACK:
10170 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10173 case BNX2X_MAC_LOOPBACK:
10174 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10175 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10181 /* prepare the loopback packet */
10182 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10183 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10184 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10187 goto test_loopback_exit;
10189 packet = skb_put(skb, pkt_size);
10190 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10191 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10192 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10193 for (i = ETH_HLEN; i < pkt_size; i++)
10194 packet[i] = (unsigned char) (i & 0xff);
10196 /* send the loopback packet */
10198 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10199 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10201 pkt_prod = fp_tx->tx_pkt_prod++;
10202 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10203 tx_buf->first_bd = fp_tx->tx_bd_prod;
10207 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10208 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10209 mapping = pci_map_single(bp->pdev, skb->data,
10210 skb_headlen(skb), PCI_DMA_TODEVICE);
10211 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10212 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10213 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10214 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10215 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10216 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10217 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10218 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10220 /* turn on parsing and get a BD */
10221 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10222 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10224 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10228 fp_tx->tx_db.data.prod += 2;
10230 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10235 fp_tx->tx_bd_prod += 2; /* start + pbd */
10236 bp->dev->trans_start = jiffies;
10240 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10241 if (tx_idx != tx_start_idx + num_pkts)
10242 goto test_loopback_exit;
10244 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10245 if (rx_idx != rx_start_idx + num_pkts)
10246 goto test_loopback_exit;
10248 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10249 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10250 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10251 goto test_loopback_rx_exit;
10253 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10254 if (len != pkt_size)
10255 goto test_loopback_rx_exit;
10257 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10259 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10260 for (i = ETH_HLEN; i < pkt_size; i++)
10261 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10262 goto test_loopback_rx_exit;
10266 test_loopback_rx_exit:
10268 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10269 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10270 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10271 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10273 /* Update producers */
10274 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10275 fp_rx->rx_sge_prod);
10277 test_loopback_exit:
10278 bp->link_params.loopback_mode = LOOPBACK_NONE;
10283 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10287 if (!netif_running(bp->dev))
10288 return BNX2X_LOOPBACK_FAILED;
10290 bnx2x_netif_stop(bp, 1);
10291 bnx2x_acquire_phy_lock(bp);
10293 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10295 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10296 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10299 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10301 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10302 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10305 bnx2x_release_phy_lock(bp);
10306 bnx2x_netif_start(bp);
10311 #define CRC32_RESIDUAL 0xdebb20e3
10313 static int bnx2x_test_nvram(struct bnx2x *bp)
10315 static const struct {
10319 { 0, 0x14 }, /* bootstrap */
10320 { 0x14, 0xec }, /* dir */
10321 { 0x100, 0x350 }, /* manuf_info */
10322 { 0x450, 0xf0 }, /* feature_info */
10323 { 0x640, 0x64 }, /* upgrade_key_info */
10325 { 0x708, 0x70 }, /* manuf_key_info */
10329 __be32 buf[0x350 / 4];
10330 u8 *data = (u8 *)buf;
10334 rc = bnx2x_nvram_read(bp, 0, data, 4);
10336 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10337 goto test_nvram_exit;
10340 magic = be32_to_cpu(buf[0]);
10341 if (magic != 0x669955aa) {
10342 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10344 goto test_nvram_exit;
10347 for (i = 0; nvram_tbl[i].size; i++) {
10349 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10350 nvram_tbl[i].size);
10352 DP(NETIF_MSG_PROBE,
10353 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10354 goto test_nvram_exit;
10357 crc = ether_crc_le(nvram_tbl[i].size, data);
10358 if (crc != CRC32_RESIDUAL) {
10359 DP(NETIF_MSG_PROBE,
10360 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10362 goto test_nvram_exit;
10370 static int bnx2x_test_intr(struct bnx2x *bp)
10372 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10375 if (!netif_running(bp->dev))
10378 config->hdr.length = 0;
10379 if (CHIP_IS_E1(bp))
10380 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10382 config->hdr.offset = BP_FUNC(bp);
10383 config->hdr.client_id = bp->fp->cl_id;
10384 config->hdr.reserved1 = 0;
10386 bp->set_mac_pending++;
10388 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10389 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10390 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10392 for (i = 0; i < 10; i++) {
10393 if (!bp->set_mac_pending)
10396 msleep_interruptible(10);
10405 static void bnx2x_self_test(struct net_device *dev,
10406 struct ethtool_test *etest, u64 *buf)
10408 struct bnx2x *bp = netdev_priv(dev);
10410 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10412 if (!netif_running(dev))
10415 /* offline tests are not supported in MF mode */
10417 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10419 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10420 int port = BP_PORT(bp);
10424 /* save current value of input enable for TX port IF */
10425 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10426 /* disable input for TX port IF */
10427 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10429 link_up = bp->link_vars.link_up;
10430 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10431 bnx2x_nic_load(bp, LOAD_DIAG);
10432 /* wait until link state is restored */
10433 bnx2x_wait_for_link(bp, link_up);
10435 if (bnx2x_test_registers(bp) != 0) {
10437 etest->flags |= ETH_TEST_FL_FAILED;
10439 if (bnx2x_test_memory(bp) != 0) {
10441 etest->flags |= ETH_TEST_FL_FAILED;
10443 buf[2] = bnx2x_test_loopback(bp, link_up);
10445 etest->flags |= ETH_TEST_FL_FAILED;
10447 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10449 /* restore input for TX port IF */
10450 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10452 bnx2x_nic_load(bp, LOAD_NORMAL);
10453 /* wait until link state is restored */
10454 bnx2x_wait_for_link(bp, link_up);
10456 if (bnx2x_test_nvram(bp) != 0) {
10458 etest->flags |= ETH_TEST_FL_FAILED;
10460 if (bnx2x_test_intr(bp) != 0) {
10462 etest->flags |= ETH_TEST_FL_FAILED;
10465 if (bnx2x_link_test(bp) != 0) {
10467 etest->flags |= ETH_TEST_FL_FAILED;
10470 #ifdef BNX2X_EXTRA_DEBUG
10471 bnx2x_panic_dump(bp);
10475 static const struct {
10478 u8 string[ETH_GSTRING_LEN];
10479 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10480 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10481 { Q_STATS_OFFSET32(error_bytes_received_hi),
10482 8, "[%d]: rx_error_bytes" },
10483 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10484 8, "[%d]: rx_ucast_packets" },
10485 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10486 8, "[%d]: rx_mcast_packets" },
10487 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10488 8, "[%d]: rx_bcast_packets" },
10489 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10490 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10491 4, "[%d]: rx_phy_ip_err_discards"},
10492 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10493 4, "[%d]: rx_skb_alloc_discard" },
10494 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10496 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10497 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10498 8, "[%d]: tx_packets" }
10501 static const struct {
10505 #define STATS_FLAGS_PORT 1
10506 #define STATS_FLAGS_FUNC 2
10507 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10508 u8 string[ETH_GSTRING_LEN];
10509 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10510 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10511 8, STATS_FLAGS_BOTH, "rx_bytes" },
10512 { STATS_OFFSET32(error_bytes_received_hi),
10513 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10514 { STATS_OFFSET32(total_unicast_packets_received_hi),
10515 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10516 { STATS_OFFSET32(total_multicast_packets_received_hi),
10517 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10518 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10519 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10520 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10521 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10522 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10523 8, STATS_FLAGS_PORT, "rx_align_errors" },
10524 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10525 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10526 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10527 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10528 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10529 8, STATS_FLAGS_PORT, "rx_fragments" },
10530 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10531 8, STATS_FLAGS_PORT, "rx_jabbers" },
10532 { STATS_OFFSET32(no_buff_discard_hi),
10533 8, STATS_FLAGS_BOTH, "rx_discards" },
10534 { STATS_OFFSET32(mac_filter_discard),
10535 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10536 { STATS_OFFSET32(xxoverflow_discard),
10537 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10538 { STATS_OFFSET32(brb_drop_hi),
10539 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10540 { STATS_OFFSET32(brb_truncate_hi),
10541 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10542 { STATS_OFFSET32(pause_frames_received_hi),
10543 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10544 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10545 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10546 { STATS_OFFSET32(nig_timer_max),
10547 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10548 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10549 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10550 { STATS_OFFSET32(rx_skb_alloc_failed),
10551 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10552 { STATS_OFFSET32(hw_csum_err),
10553 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10555 { STATS_OFFSET32(total_bytes_transmitted_hi),
10556 8, STATS_FLAGS_BOTH, "tx_bytes" },
10557 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10558 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10559 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10560 8, STATS_FLAGS_BOTH, "tx_packets" },
10561 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10562 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10563 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10564 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10565 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10566 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10567 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10568 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10569 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10570 8, STATS_FLAGS_PORT, "tx_deferred" },
10571 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10572 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10573 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10574 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10575 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10576 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10577 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10578 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10579 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10580 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10581 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10582 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10583 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10584 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10585 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10586 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10587 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10588 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10589 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10590 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10591 { STATS_OFFSET32(pause_frames_sent_hi),
10592 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10595 #define IS_PORT_STAT(i) \
10596 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10597 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10598 #define IS_E1HMF_MODE_STAT(bp) \
10599 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10601 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10603 struct bnx2x *bp = netdev_priv(dev);
10606 switch(stringset) {
10608 if (is_multi(bp)) {
10609 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10610 if (!IS_E1HMF_MODE_STAT(bp))
10611 num_stats += BNX2X_NUM_STATS;
10613 if (IS_E1HMF_MODE_STAT(bp)) {
10615 for (i = 0; i < BNX2X_NUM_STATS; i++)
10616 if (IS_FUNC_STAT(i))
10619 num_stats = BNX2X_NUM_STATS;
10624 return BNX2X_NUM_TESTS;
10631 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10633 struct bnx2x *bp = netdev_priv(dev);
10636 switch (stringset) {
10638 if (is_multi(bp)) {
10640 for_each_rx_queue(bp, i) {
10641 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10642 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10643 bnx2x_q_stats_arr[j].string, i);
10644 k += BNX2X_NUM_Q_STATS;
10646 if (IS_E1HMF_MODE_STAT(bp))
10648 for (j = 0; j < BNX2X_NUM_STATS; j++)
10649 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10650 bnx2x_stats_arr[j].string);
10652 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10653 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10655 strcpy(buf + j*ETH_GSTRING_LEN,
10656 bnx2x_stats_arr[i].string);
10663 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10668 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10669 struct ethtool_stats *stats, u64 *buf)
10671 struct bnx2x *bp = netdev_priv(dev);
10672 u32 *hw_stats, *offset;
10675 if (is_multi(bp)) {
10677 for_each_rx_queue(bp, i) {
10678 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10679 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10680 if (bnx2x_q_stats_arr[j].size == 0) {
10681 /* skip this counter */
10685 offset = (hw_stats +
10686 bnx2x_q_stats_arr[j].offset);
10687 if (bnx2x_q_stats_arr[j].size == 4) {
10688 /* 4-byte counter */
10689 buf[k + j] = (u64) *offset;
10692 /* 8-byte counter */
10693 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10695 k += BNX2X_NUM_Q_STATS;
10697 if (IS_E1HMF_MODE_STAT(bp))
10699 hw_stats = (u32 *)&bp->eth_stats;
10700 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10701 if (bnx2x_stats_arr[j].size == 0) {
10702 /* skip this counter */
10706 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10707 if (bnx2x_stats_arr[j].size == 4) {
10708 /* 4-byte counter */
10709 buf[k + j] = (u64) *offset;
10712 /* 8-byte counter */
10713 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10716 hw_stats = (u32 *)&bp->eth_stats;
10717 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10718 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10720 if (bnx2x_stats_arr[i].size == 0) {
10721 /* skip this counter */
10726 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10727 if (bnx2x_stats_arr[i].size == 4) {
10728 /* 4-byte counter */
10729 buf[j] = (u64) *offset;
10733 /* 8-byte counter */
10734 buf[j] = HILO_U64(*offset, *(offset + 1));
10740 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10742 struct bnx2x *bp = netdev_priv(dev);
10743 int port = BP_PORT(bp);
10746 if (!netif_running(dev))
10755 for (i = 0; i < (data * 2); i++) {
10757 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10758 bp->link_params.hw_led_mode,
10759 bp->link_params.chip_id);
10761 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10762 bp->link_params.hw_led_mode,
10763 bp->link_params.chip_id);
10765 msleep_interruptible(500);
10766 if (signal_pending(current))
10770 if (bp->link_vars.link_up)
10771 bnx2x_set_led(bp, port, LED_MODE_OPER,
10772 bp->link_vars.line_speed,
10773 bp->link_params.hw_led_mode,
10774 bp->link_params.chip_id);
10779 static const struct ethtool_ops bnx2x_ethtool_ops = {
10780 .get_settings = bnx2x_get_settings,
10781 .set_settings = bnx2x_set_settings,
10782 .get_drvinfo = bnx2x_get_drvinfo,
10783 .get_regs_len = bnx2x_get_regs_len,
10784 .get_regs = bnx2x_get_regs,
10785 .get_wol = bnx2x_get_wol,
10786 .set_wol = bnx2x_set_wol,
10787 .get_msglevel = bnx2x_get_msglevel,
10788 .set_msglevel = bnx2x_set_msglevel,
10789 .nway_reset = bnx2x_nway_reset,
10790 .get_link = bnx2x_get_link,
10791 .get_eeprom_len = bnx2x_get_eeprom_len,
10792 .get_eeprom = bnx2x_get_eeprom,
10793 .set_eeprom = bnx2x_set_eeprom,
10794 .get_coalesce = bnx2x_get_coalesce,
10795 .set_coalesce = bnx2x_set_coalesce,
10796 .get_ringparam = bnx2x_get_ringparam,
10797 .set_ringparam = bnx2x_set_ringparam,
10798 .get_pauseparam = bnx2x_get_pauseparam,
10799 .set_pauseparam = bnx2x_set_pauseparam,
10800 .get_rx_csum = bnx2x_get_rx_csum,
10801 .set_rx_csum = bnx2x_set_rx_csum,
10802 .get_tx_csum = ethtool_op_get_tx_csum,
10803 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10804 .set_flags = bnx2x_set_flags,
10805 .get_flags = ethtool_op_get_flags,
10806 .get_sg = ethtool_op_get_sg,
10807 .set_sg = ethtool_op_set_sg,
10808 .get_tso = ethtool_op_get_tso,
10809 .set_tso = bnx2x_set_tso,
10810 .self_test = bnx2x_self_test,
10811 .get_sset_count = bnx2x_get_sset_count,
10812 .get_strings = bnx2x_get_strings,
10813 .phys_id = bnx2x_phys_id,
10814 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10817 /* end of ethtool_ops */
10819 /****************************************************************************
10820 * General service functions
10821 ****************************************************************************/
10823 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10827 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10831 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10832 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10833 PCI_PM_CTRL_PME_STATUS));
10835 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10836 /* delay required during transition out of D3hot */
10841 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10845 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10847 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10850 /* No more memory access after this point until
10851 * device is brought back to D0.
10861 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10865 /* Tell compiler that status block fields can change */
10867 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10868 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10870 return (fp->rx_comp_cons != rx_cons_sb);
10874 * net_device service functions
10877 static int bnx2x_poll(struct napi_struct *napi, int budget)
10879 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10881 struct bnx2x *bp = fp->bp;
10884 #ifdef BNX2X_STOP_ON_ERROR
10885 if (unlikely(bp->panic))
10889 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10890 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10892 bnx2x_update_fpsb_idx(fp);
10894 if (bnx2x_has_rx_work(fp)) {
10895 work_done = bnx2x_rx_int(fp, budget);
10897 /* must not complete if we consumed full budget */
10898 if (work_done >= budget)
10902 /* bnx2x_has_rx_work() reads the status block, thus we need to
10903 * ensure that status block indices have been actually read
10904 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10905 * so that we won't write the "newer" value of the status block to IGU
10906 * (if there was a DMA right after bnx2x_has_rx_work and
10907 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10908 * may be postponed to right before bnx2x_ack_sb). In this case
10909 * there will never be another interrupt until there is another update
10910 * of the status block, while there is still unhandled work.
10914 if (!bnx2x_has_rx_work(fp)) {
10915 #ifdef BNX2X_STOP_ON_ERROR
10918 napi_complete(napi);
10920 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10921 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10922 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10923 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10931 /* we split the first BD into headers and data BDs
10932 * to ease the pain of our fellow microcode engineers
10933 * we use one mapping for both BDs
10934 * So far this has only been observed to happen
10935 * in Other Operating Systems(TM)
10937 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10938 struct bnx2x_fastpath *fp,
10939 struct sw_tx_bd *tx_buf,
10940 struct eth_tx_start_bd **tx_bd, u16 hlen,
10941 u16 bd_prod, int nbd)
10943 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10944 struct eth_tx_bd *d_tx_bd;
10945 dma_addr_t mapping;
10946 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10948 /* first fix first BD */
10949 h_tx_bd->nbd = cpu_to_le16(nbd);
10950 h_tx_bd->nbytes = cpu_to_le16(hlen);
10952 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10953 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10954 h_tx_bd->addr_lo, h_tx_bd->nbd);
10956 /* now get a new data BD
10957 * (after the pbd) and fill it */
10958 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10959 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10961 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10962 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10964 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10965 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10966 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10968 /* this marks the BD as one that has no individual mapping */
10969 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10971 DP(NETIF_MSG_TX_QUEUED,
10972 "TSO split data size is %d (%x:%x)\n",
10973 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10976 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10981 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10984 csum = (u16) ~csum_fold(csum_sub(csum,
10985 csum_partial(t_header - fix, fix, 0)));
10988 csum = (u16) ~csum_fold(csum_add(csum,
10989 csum_partial(t_header, -fix, 0)));
10991 return swab16(csum);
10994 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10998 if (skb->ip_summed != CHECKSUM_PARTIAL)
11002 if (skb->protocol == htons(ETH_P_IPV6)) {
11004 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11005 rc |= XMIT_CSUM_TCP;
11009 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11010 rc |= XMIT_CSUM_TCP;
11014 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11017 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11023 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11024 /* check if packet requires linearization (packet is too fragmented)
11025 no need to check fragmentation if page size > 8K (there will be no
11026 violation to FW restrictions) */
11027 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11032 int first_bd_sz = 0;
11034 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11035 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11037 if (xmit_type & XMIT_GSO) {
11038 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11039 /* Check if LSO packet needs to be copied:
11040 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11041 int wnd_size = MAX_FETCH_BD - 3;
11042 /* Number of windows to check */
11043 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11048 /* Headers length */
11049 hlen = (int)(skb_transport_header(skb) - skb->data) +
11052 /* Amount of data (w/o headers) on linear part of SKB*/
11053 first_bd_sz = skb_headlen(skb) - hlen;
11055 wnd_sum = first_bd_sz;
11057 /* Calculate the first sum - it's special */
11058 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11060 skb_shinfo(skb)->frags[frag_idx].size;
11062 /* If there was data on linear skb data - check it */
11063 if (first_bd_sz > 0) {
11064 if (unlikely(wnd_sum < lso_mss)) {
11069 wnd_sum -= first_bd_sz;
11072 /* Others are easier: run through the frag list and
11073 check all windows */
11074 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11076 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11078 if (unlikely(wnd_sum < lso_mss)) {
11083 skb_shinfo(skb)->frags[wnd_idx].size;
11086 /* in non-LSO too fragmented packet should always
11093 if (unlikely(to_copy))
11094 DP(NETIF_MSG_TX_QUEUED,
11095 "Linearization IS REQUIRED for %s packet. "
11096 "num_frags %d hlen %d first_bd_sz %d\n",
11097 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11098 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11104 /* called with netif_tx_lock
11105 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11106 * netif_wake_queue()
11108 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11110 struct bnx2x *bp = netdev_priv(dev);
11111 struct bnx2x_fastpath *fp, *fp_stat;
11112 struct netdev_queue *txq;
11113 struct sw_tx_bd *tx_buf;
11114 struct eth_tx_start_bd *tx_start_bd;
11115 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11116 struct eth_tx_parse_bd *pbd = NULL;
11117 u16 pkt_prod, bd_prod;
11119 dma_addr_t mapping;
11120 u32 xmit_type = bnx2x_xmit_type(bp, skb);
11123 __le16 pkt_size = 0;
11125 #ifdef BNX2X_STOP_ON_ERROR
11126 if (unlikely(bp->panic))
11127 return NETDEV_TX_BUSY;
11130 fp_index = skb_get_queue_mapping(skb);
11131 txq = netdev_get_tx_queue(dev, fp_index);
11133 fp = &bp->fp[fp_index + bp->num_rx_queues];
11134 fp_stat = &bp->fp[fp_index];
11136 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11137 fp_stat->eth_q_stats.driver_xoff++;
11138 netif_tx_stop_queue(txq);
11139 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11140 return NETDEV_TX_BUSY;
11143 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11144 " gso type %x xmit_type %x\n",
11145 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11146 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11148 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11149 /* First, check if we need to linearize the skb (due to FW
11150 restrictions). No need to check fragmentation if page size > 8K
11151 (there will be no violation to FW restrictions) */
11152 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11153 /* Statistics of linearization */
11155 if (skb_linearize(skb) != 0) {
11156 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11157 "silently dropping this SKB\n");
11158 dev_kfree_skb_any(skb);
11159 return NETDEV_TX_OK;
11165 Please read carefully. First we use one BD which we mark as start,
11166 then we have a parsing info BD (used for TSO or xsum),
11167 and only then we have the rest of the TSO BDs.
11168 (don't forget to mark the last one as last,
11169 and to unmap only AFTER you write to the BD ...)
11170 And above all, all pdb sizes are in words - NOT DWORDS!
11173 pkt_prod = fp->tx_pkt_prod++;
11174 bd_prod = TX_BD(fp->tx_bd_prod);
11176 /* get a tx_buf and first BD */
11177 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11178 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11180 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11181 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11182 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11184 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11186 /* remember the first BD of the packet */
11187 tx_buf->first_bd = fp->tx_bd_prod;
11191 DP(NETIF_MSG_TX_QUEUED,
11192 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11193 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11196 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11197 (bp->flags & HW_VLAN_TX_FLAG)) {
11198 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11199 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11202 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11204 /* turn on parsing and get a BD */
11205 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11206 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11208 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11210 if (xmit_type & XMIT_CSUM) {
11211 hlen = (skb_network_header(skb) - skb->data) / 2;
11213 /* for now NS flag is not used in Linux */
11215 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11216 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11218 pbd->ip_hlen = (skb_transport_header(skb) -
11219 skb_network_header(skb)) / 2;
11221 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11223 pbd->total_hlen = cpu_to_le16(hlen);
11226 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11228 if (xmit_type & XMIT_CSUM_V4)
11229 tx_start_bd->bd_flags.as_bitfield |=
11230 ETH_TX_BD_FLAGS_IP_CSUM;
11232 tx_start_bd->bd_flags.as_bitfield |=
11233 ETH_TX_BD_FLAGS_IPV6;
11235 if (xmit_type & XMIT_CSUM_TCP) {
11236 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11239 s8 fix = SKB_CS_OFF(skb); /* signed! */
11241 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11243 DP(NETIF_MSG_TX_QUEUED,
11244 "hlen %d fix %d csum before fix %x\n",
11245 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11247 /* HW bug: fixup the CSUM */
11248 pbd->tcp_pseudo_csum =
11249 bnx2x_csum_fix(skb_transport_header(skb),
11252 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11253 pbd->tcp_pseudo_csum);
11257 mapping = pci_map_single(bp->pdev, skb->data,
11258 skb_headlen(skb), PCI_DMA_TODEVICE);
11260 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11261 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11262 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11263 tx_start_bd->nbd = cpu_to_le16(nbd);
11264 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11265 pkt_size = tx_start_bd->nbytes;
11267 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11268 " nbytes %d flags %x vlan %x\n",
11269 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11270 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11271 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11273 if (xmit_type & XMIT_GSO) {
11275 DP(NETIF_MSG_TX_QUEUED,
11276 "TSO packet len %d hlen %d total len %d tso size %d\n",
11277 skb->len, hlen, skb_headlen(skb),
11278 skb_shinfo(skb)->gso_size);
11280 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11282 if (unlikely(skb_headlen(skb) > hlen))
11283 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11284 hlen, bd_prod, ++nbd);
11286 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11287 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11288 pbd->tcp_flags = pbd_tcp_flags(skb);
11290 if (xmit_type & XMIT_GSO_V4) {
11291 pbd->ip_id = swab16(ip_hdr(skb)->id);
11292 pbd->tcp_pseudo_csum =
11293 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11294 ip_hdr(skb)->daddr,
11295 0, IPPROTO_TCP, 0));
11298 pbd->tcp_pseudo_csum =
11299 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11300 &ipv6_hdr(skb)->daddr,
11301 0, IPPROTO_TCP, 0));
11303 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11305 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11307 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11308 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11310 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11311 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11312 if (total_pkt_bd == NULL)
11313 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11315 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11316 frag->size, PCI_DMA_TODEVICE);
11318 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11319 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11320 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11321 le16_add_cpu(&pkt_size, frag->size);
11323 DP(NETIF_MSG_TX_QUEUED,
11324 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11325 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11326 le16_to_cpu(tx_data_bd->nbytes));
11329 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11331 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11333 /* now send a tx doorbell, counting the next BD
11334 * if the packet contains or ends with it
11336 if (TX_BD_POFF(bd_prod) < nbd)
11339 if (total_pkt_bd != NULL)
11340 total_pkt_bd->total_pkt_bytes = pkt_size;
11343 DP(NETIF_MSG_TX_QUEUED,
11344 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11345 " tcp_flags %x xsum %x seq %u hlen %u\n",
11346 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11347 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11348 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11350 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11353 * Make sure that the BD data is updated before updating the producer
11354 * since FW might read the BD right after the producer is updated.
11355 * This is only applicable for weak-ordered memory model archs such
11356 * as IA-64. The following barrier is also mandatory since FW will
11357 * assumes packets must have BDs.
11361 fp->tx_db.data.prod += nbd;
11363 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11367 fp->tx_bd_prod += nbd;
11369 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11370 netif_tx_stop_queue(txq);
11371 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11372 if we put Tx into XOFF state. */
11374 fp_stat->eth_q_stats.driver_xoff++;
11375 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11376 netif_tx_wake_queue(txq);
11380 return NETDEV_TX_OK;
11383 /* called with rtnl_lock */
11384 static int bnx2x_open(struct net_device *dev)
11386 struct bnx2x *bp = netdev_priv(dev);
11388 netif_carrier_off(dev);
11390 bnx2x_set_power_state(bp, PCI_D0);
11392 return bnx2x_nic_load(bp, LOAD_OPEN);
11395 /* called with rtnl_lock */
11396 static int bnx2x_close(struct net_device *dev)
11398 struct bnx2x *bp = netdev_priv(dev);
11400 /* Unload the driver, release IRQs */
11401 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11402 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11403 if (!CHIP_REV_IS_SLOW(bp))
11404 bnx2x_set_power_state(bp, PCI_D3hot);
11409 /* called with netif_tx_lock from dev_mcast.c */
11410 static void bnx2x_set_rx_mode(struct net_device *dev)
11412 struct bnx2x *bp = netdev_priv(dev);
11413 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11414 int port = BP_PORT(bp);
11416 if (bp->state != BNX2X_STATE_OPEN) {
11417 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11421 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11423 if (dev->flags & IFF_PROMISC)
11424 rx_mode = BNX2X_RX_MODE_PROMISC;
11426 else if ((dev->flags & IFF_ALLMULTI) ||
11427 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11428 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11430 else { /* some multicasts */
11431 if (CHIP_IS_E1(bp)) {
11432 int i, old, offset;
11433 struct dev_mc_list *mclist;
11434 struct mac_configuration_cmd *config =
11435 bnx2x_sp(bp, mcast_config);
11437 for (i = 0, mclist = dev->mc_list;
11438 mclist && (i < dev->mc_count);
11439 i++, mclist = mclist->next) {
11441 config->config_table[i].
11442 cam_entry.msb_mac_addr =
11443 swab16(*(u16 *)&mclist->dmi_addr[0]);
11444 config->config_table[i].
11445 cam_entry.middle_mac_addr =
11446 swab16(*(u16 *)&mclist->dmi_addr[2]);
11447 config->config_table[i].
11448 cam_entry.lsb_mac_addr =
11449 swab16(*(u16 *)&mclist->dmi_addr[4]);
11450 config->config_table[i].cam_entry.flags =
11452 config->config_table[i].
11453 target_table_entry.flags = 0;
11454 config->config_table[i].target_table_entry.
11455 clients_bit_vector =
11456 cpu_to_le32(1 << BP_L_ID(bp));
11457 config->config_table[i].
11458 target_table_entry.vlan_id = 0;
11461 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11462 config->config_table[i].
11463 cam_entry.msb_mac_addr,
11464 config->config_table[i].
11465 cam_entry.middle_mac_addr,
11466 config->config_table[i].
11467 cam_entry.lsb_mac_addr);
11469 old = config->hdr.length;
11471 for (; i < old; i++) {
11472 if (CAM_IS_INVALID(config->
11473 config_table[i])) {
11474 /* already invalidated */
11478 CAM_INVALIDATE(config->
11483 if (CHIP_REV_IS_SLOW(bp))
11484 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11486 offset = BNX2X_MAX_MULTICAST*(1 + port);
11488 config->hdr.length = i;
11489 config->hdr.offset = offset;
11490 config->hdr.client_id = bp->fp->cl_id;
11491 config->hdr.reserved1 = 0;
11493 bp->set_mac_pending++;
11496 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11497 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11498 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11501 /* Accept one or more multicasts */
11502 struct dev_mc_list *mclist;
11503 u32 mc_filter[MC_HASH_SIZE];
11504 u32 crc, bit, regidx;
11507 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11509 for (i = 0, mclist = dev->mc_list;
11510 mclist && (i < dev->mc_count);
11511 i++, mclist = mclist->next) {
11513 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11516 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11517 bit = (crc >> 24) & 0xff;
11520 mc_filter[regidx] |= (1 << bit);
11523 for (i = 0; i < MC_HASH_SIZE; i++)
11524 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11529 bp->rx_mode = rx_mode;
11530 bnx2x_set_storm_rx_mode(bp);
11533 /* called with rtnl_lock */
11534 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11536 struct sockaddr *addr = p;
11537 struct bnx2x *bp = netdev_priv(dev);
11539 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11542 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11543 if (netif_running(dev)) {
11544 if (CHIP_IS_E1(bp))
11545 bnx2x_set_eth_mac_addr_e1(bp, 1);
11547 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11553 /* called with rtnl_lock */
11554 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11555 int devad, u16 addr)
11557 struct bnx2x *bp = netdev_priv(netdev);
11560 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11562 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11563 prtad, devad, addr);
11565 if (prtad != bp->mdio.prtad) {
11566 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11567 prtad, bp->mdio.prtad);
11571 /* The HW expects different devad if CL22 is used */
11572 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11574 bnx2x_acquire_phy_lock(bp);
11575 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11576 devad, addr, &value);
11577 bnx2x_release_phy_lock(bp);
11578 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11585 /* called with rtnl_lock */
11586 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11587 u16 addr, u16 value)
11589 struct bnx2x *bp = netdev_priv(netdev);
11590 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11593 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11594 " value 0x%x\n", prtad, devad, addr, value);
11596 if (prtad != bp->mdio.prtad) {
11597 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11598 prtad, bp->mdio.prtad);
11602 /* The HW expects different devad if CL22 is used */
11603 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11605 bnx2x_acquire_phy_lock(bp);
11606 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11607 devad, addr, value);
11608 bnx2x_release_phy_lock(bp);
11612 /* called with rtnl_lock */
11613 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11615 struct bnx2x *bp = netdev_priv(dev);
11616 struct mii_ioctl_data *mdio = if_mii(ifr);
11618 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11619 mdio->phy_id, mdio->reg_num, mdio->val_in);
11621 if (!netif_running(dev))
11624 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11627 /* called with rtnl_lock */
11628 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11630 struct bnx2x *bp = netdev_priv(dev);
11633 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11634 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11637 /* This does not race with packet allocation
11638 * because the actual alloc size is
11639 * only updated as part of load
11641 dev->mtu = new_mtu;
11643 if (netif_running(dev)) {
11644 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11645 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11651 static void bnx2x_tx_timeout(struct net_device *dev)
11653 struct bnx2x *bp = netdev_priv(dev);
11655 #ifdef BNX2X_STOP_ON_ERROR
11659 /* This allows the netif to be shutdown gracefully before resetting */
11660 schedule_work(&bp->reset_task);
11664 /* called with rtnl_lock */
11665 static void bnx2x_vlan_rx_register(struct net_device *dev,
11666 struct vlan_group *vlgrp)
11668 struct bnx2x *bp = netdev_priv(dev);
11672 /* Set flags according to the required capabilities */
11673 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11675 if (dev->features & NETIF_F_HW_VLAN_TX)
11676 bp->flags |= HW_VLAN_TX_FLAG;
11678 if (dev->features & NETIF_F_HW_VLAN_RX)
11679 bp->flags |= HW_VLAN_RX_FLAG;
11681 if (netif_running(dev))
11682 bnx2x_set_client_config(bp);
11687 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11688 static void poll_bnx2x(struct net_device *dev)
11690 struct bnx2x *bp = netdev_priv(dev);
11692 disable_irq(bp->pdev->irq);
11693 bnx2x_interrupt(bp->pdev->irq, dev);
11694 enable_irq(bp->pdev->irq);
11698 static const struct net_device_ops bnx2x_netdev_ops = {
11699 .ndo_open = bnx2x_open,
11700 .ndo_stop = bnx2x_close,
11701 .ndo_start_xmit = bnx2x_start_xmit,
11702 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11703 .ndo_set_mac_address = bnx2x_change_mac_addr,
11704 .ndo_validate_addr = eth_validate_addr,
11705 .ndo_do_ioctl = bnx2x_ioctl,
11706 .ndo_change_mtu = bnx2x_change_mtu,
11707 .ndo_tx_timeout = bnx2x_tx_timeout,
11709 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11711 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11712 .ndo_poll_controller = poll_bnx2x,
11716 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11717 struct net_device *dev)
11722 SET_NETDEV_DEV(dev, &pdev->dev);
11723 bp = netdev_priv(dev);
11728 bp->func = PCI_FUNC(pdev->devfn);
11730 rc = pci_enable_device(pdev);
11732 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11736 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11737 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11740 goto err_out_disable;
11743 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11744 printk(KERN_ERR PFX "Cannot find second PCI device"
11745 " base address, aborting\n");
11747 goto err_out_disable;
11750 if (atomic_read(&pdev->enable_cnt) == 1) {
11751 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11753 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11755 goto err_out_disable;
11758 pci_set_master(pdev);
11759 pci_save_state(pdev);
11762 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11763 if (bp->pm_cap == 0) {
11764 printk(KERN_ERR PFX "Cannot find power management"
11765 " capability, aborting\n");
11767 goto err_out_release;
11770 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11771 if (bp->pcie_cap == 0) {
11772 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11775 goto err_out_release;
11778 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11779 bp->flags |= USING_DAC_FLAG;
11780 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11781 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11782 " failed, aborting\n");
11784 goto err_out_release;
11787 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11788 printk(KERN_ERR PFX "System does not support DMA,"
11791 goto err_out_release;
11794 dev->mem_start = pci_resource_start(pdev, 0);
11795 dev->base_addr = dev->mem_start;
11796 dev->mem_end = pci_resource_end(pdev, 0);
11798 dev->irq = pdev->irq;
11800 bp->regview = pci_ioremap_bar(pdev, 0);
11801 if (!bp->regview) {
11802 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11804 goto err_out_release;
11807 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11808 min_t(u64, BNX2X_DB_SIZE,
11809 pci_resource_len(pdev, 2)));
11810 if (!bp->doorbells) {
11811 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11813 goto err_out_unmap;
11816 bnx2x_set_power_state(bp, PCI_D0);
11818 /* clean indirect addresses */
11819 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11820 PCICFG_VENDOR_ID_OFFSET);
11821 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11822 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11823 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11824 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11826 dev->watchdog_timeo = TX_TIMEOUT;
11828 dev->netdev_ops = &bnx2x_netdev_ops;
11829 dev->ethtool_ops = &bnx2x_ethtool_ops;
11830 dev->features |= NETIF_F_SG;
11831 dev->features |= NETIF_F_HW_CSUM;
11832 if (bp->flags & USING_DAC_FLAG)
11833 dev->features |= NETIF_F_HIGHDMA;
11834 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11835 dev->features |= NETIF_F_TSO6;
11837 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11838 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11840 dev->vlan_features |= NETIF_F_SG;
11841 dev->vlan_features |= NETIF_F_HW_CSUM;
11842 if (bp->flags & USING_DAC_FLAG)
11843 dev->vlan_features |= NETIF_F_HIGHDMA;
11844 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11845 dev->vlan_features |= NETIF_F_TSO6;
11848 /* get_port_hwinfo() will set prtad and mmds properly */
11849 bp->mdio.prtad = MDIO_PRTAD_NONE;
11851 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11852 bp->mdio.dev = dev;
11853 bp->mdio.mdio_read = bnx2x_mdio_read;
11854 bp->mdio.mdio_write = bnx2x_mdio_write;
11860 iounmap(bp->regview);
11861 bp->regview = NULL;
11863 if (bp->doorbells) {
11864 iounmap(bp->doorbells);
11865 bp->doorbells = NULL;
11869 if (atomic_read(&pdev->enable_cnt) == 1)
11870 pci_release_regions(pdev);
11873 pci_disable_device(pdev);
11874 pci_set_drvdata(pdev, NULL);
11880 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11881 int *width, int *speed)
11883 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11885 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11887 /* return value of 1=2.5GHz 2=5GHz */
11888 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11891 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11893 const struct firmware *firmware = bp->firmware;
11894 struct bnx2x_fw_file_hdr *fw_hdr;
11895 struct bnx2x_fw_file_section *sections;
11896 u32 offset, len, num_ops;
11901 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11904 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11905 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11907 /* Make sure none of the offsets and sizes make us read beyond
11908 * the end of the firmware data */
11909 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11910 offset = be32_to_cpu(sections[i].offset);
11911 len = be32_to_cpu(sections[i].len);
11912 if (offset + len > firmware->size) {
11913 printk(KERN_ERR PFX "Section %d length is out of "
11919 /* Likewise for the init_ops offsets */
11920 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11921 ops_offsets = (u16 *)(firmware->data + offset);
11922 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11924 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11925 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11926 printk(KERN_ERR PFX "Section offset %d is out of "
11932 /* Check FW version */
11933 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11934 fw_ver = firmware->data + offset;
11935 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11936 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11937 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11938 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11939 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11940 " Should be %d.%d.%d.%d\n",
11941 fw_ver[0], fw_ver[1], fw_ver[2],
11942 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11943 BCM_5710_FW_MINOR_VERSION,
11944 BCM_5710_FW_REVISION_VERSION,
11945 BCM_5710_FW_ENGINEERING_VERSION);
11952 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11954 const __be32 *source = (const __be32 *)_source;
11955 u32 *target = (u32 *)_target;
11958 for (i = 0; i < n/4; i++)
11959 target[i] = be32_to_cpu(source[i]);
11963 Ops array is stored in the following format:
11964 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11966 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11968 const __be32 *source = (const __be32 *)_source;
11969 struct raw_op *target = (struct raw_op *)_target;
11972 for (i = 0, j = 0; i < n/8; i++, j += 2) {
11973 tmp = be32_to_cpu(source[j]);
11974 target[i].op = (tmp >> 24) & 0xff;
11975 target[i].offset = tmp & 0xffffff;
11976 target[i].raw_data = be32_to_cpu(source[j+1]);
11980 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11982 const __be16 *source = (const __be16 *)_source;
11983 u16 *target = (u16 *)_target;
11986 for (i = 0; i < n/2; i++)
11987 target[i] = be16_to_cpu(source[i]);
11990 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11992 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11993 bp->arr = kmalloc(len, GFP_KERNEL); \
11995 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
11996 "for "#arr"\n", len); \
11999 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12000 (u8 *)bp->arr, len); \
12003 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12005 char fw_file_name[40] = {0};
12006 struct bnx2x_fw_file_hdr *fw_hdr;
12009 /* Create a FW file name */
12010 if (CHIP_IS_E1(bp))
12011 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
12013 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12015 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12016 BCM_5710_FW_MAJOR_VERSION,
12017 BCM_5710_FW_MINOR_VERSION,
12018 BCM_5710_FW_REVISION_VERSION,
12019 BCM_5710_FW_ENGINEERING_VERSION);
12021 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12023 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12025 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12027 goto request_firmware_exit;
12030 rc = bnx2x_check_firmware(bp);
12032 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12033 goto request_firmware_exit;
12036 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12038 /* Initialize the pointers to the init arrays */
12040 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12043 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12046 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12049 /* STORMs firmware */
12050 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12051 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12052 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12053 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12054 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12055 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12056 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12057 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12058 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12059 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12060 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12061 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12062 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12063 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12064 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12065 be32_to_cpu(fw_hdr->csem_pram_data.offset);
12069 init_offsets_alloc_err:
12070 kfree(bp->init_ops);
12071 init_ops_alloc_err:
12072 kfree(bp->init_data);
12073 request_firmware_exit:
12074 release_firmware(bp->firmware);
12080 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12081 const struct pci_device_id *ent)
12083 struct net_device *dev = NULL;
12085 int pcie_width, pcie_speed;
12088 /* dev zeroed in init_etherdev */
12089 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12091 printk(KERN_ERR PFX "Cannot allocate net device\n");
12095 bp = netdev_priv(dev);
12096 bp->msglevel = debug;
12098 pci_set_drvdata(pdev, dev);
12100 rc = bnx2x_init_dev(pdev, dev);
12106 rc = bnx2x_init_bp(bp);
12108 goto init_one_exit;
12110 /* Set init arrays */
12111 rc = bnx2x_init_firmware(bp, &pdev->dev);
12113 printk(KERN_ERR PFX "Error loading firmware\n");
12114 goto init_one_exit;
12117 rc = register_netdev(dev);
12119 dev_err(&pdev->dev, "Cannot register net device\n");
12120 goto init_one_exit;
12123 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12124 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12125 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12126 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12127 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12128 dev->base_addr, bp->pdev->irq);
12129 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12135 iounmap(bp->regview);
12138 iounmap(bp->doorbells);
12142 if (atomic_read(&pdev->enable_cnt) == 1)
12143 pci_release_regions(pdev);
12145 pci_disable_device(pdev);
12146 pci_set_drvdata(pdev, NULL);
12151 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12153 struct net_device *dev = pci_get_drvdata(pdev);
12157 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12160 bp = netdev_priv(dev);
12162 unregister_netdev(dev);
12164 kfree(bp->init_ops_offsets);
12165 kfree(bp->init_ops);
12166 kfree(bp->init_data);
12167 release_firmware(bp->firmware);
12170 iounmap(bp->regview);
12173 iounmap(bp->doorbells);
12177 if (atomic_read(&pdev->enable_cnt) == 1)
12178 pci_release_regions(pdev);
12180 pci_disable_device(pdev);
12181 pci_set_drvdata(pdev, NULL);
12184 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12186 struct net_device *dev = pci_get_drvdata(pdev);
12190 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12193 bp = netdev_priv(dev);
12197 pci_save_state(pdev);
12199 if (!netif_running(dev)) {
12204 netif_device_detach(dev);
12206 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12208 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12215 static int bnx2x_resume(struct pci_dev *pdev)
12217 struct net_device *dev = pci_get_drvdata(pdev);
12222 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12225 bp = netdev_priv(dev);
12229 pci_restore_state(pdev);
12231 if (!netif_running(dev)) {
12236 bnx2x_set_power_state(bp, PCI_D0);
12237 netif_device_attach(dev);
12239 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12246 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12250 bp->state = BNX2X_STATE_ERROR;
12252 bp->rx_mode = BNX2X_RX_MODE_NONE;
12254 bnx2x_netif_stop(bp, 0);
12256 del_timer_sync(&bp->timer);
12257 bp->stats_state = STATS_STATE_DISABLED;
12258 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12261 bnx2x_free_irq(bp);
12263 if (CHIP_IS_E1(bp)) {
12264 struct mac_configuration_cmd *config =
12265 bnx2x_sp(bp, mcast_config);
12267 for (i = 0; i < config->hdr.length; i++)
12268 CAM_INVALIDATE(config->config_table[i]);
12271 /* Free SKBs, SGEs, TPA pool and driver internals */
12272 bnx2x_free_skbs(bp);
12273 for_each_rx_queue(bp, i)
12274 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12275 for_each_rx_queue(bp, i)
12276 netif_napi_del(&bnx2x_fp(bp, i, napi));
12277 bnx2x_free_mem(bp);
12279 bp->state = BNX2X_STATE_CLOSED;
12281 netif_carrier_off(bp->dev);
12286 static void bnx2x_eeh_recover(struct bnx2x *bp)
12290 mutex_init(&bp->port.phy_mutex);
12292 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12293 bp->link_params.shmem_base = bp->common.shmem_base;
12294 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12296 if (!bp->common.shmem_base ||
12297 (bp->common.shmem_base < 0xA0000) ||
12298 (bp->common.shmem_base >= 0xC0000)) {
12299 BNX2X_DEV_INFO("MCP not active\n");
12300 bp->flags |= NO_MCP_FLAG;
12304 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12305 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12306 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12307 BNX2X_ERR("BAD MCP validity signature\n");
12309 if (!BP_NOMCP(bp)) {
12310 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12311 & DRV_MSG_SEQ_NUMBER_MASK);
12312 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12317 * bnx2x_io_error_detected - called when PCI error is detected
12318 * @pdev: Pointer to PCI device
12319 * @state: The current pci connection state
12321 * This function is called after a PCI bus error affecting
12322 * this device has been detected.
12324 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12325 pci_channel_state_t state)
12327 struct net_device *dev = pci_get_drvdata(pdev);
12328 struct bnx2x *bp = netdev_priv(dev);
12332 netif_device_detach(dev);
12334 if (state == pci_channel_io_perm_failure) {
12336 return PCI_ERS_RESULT_DISCONNECT;
12339 if (netif_running(dev))
12340 bnx2x_eeh_nic_unload(bp);
12342 pci_disable_device(pdev);
12346 /* Request a slot reset */
12347 return PCI_ERS_RESULT_NEED_RESET;
12351 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12352 * @pdev: Pointer to PCI device
12354 * Restart the card from scratch, as if from a cold-boot.
12356 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12358 struct net_device *dev = pci_get_drvdata(pdev);
12359 struct bnx2x *bp = netdev_priv(dev);
12363 if (pci_enable_device(pdev)) {
12364 dev_err(&pdev->dev,
12365 "Cannot re-enable PCI device after reset\n");
12367 return PCI_ERS_RESULT_DISCONNECT;
12370 pci_set_master(pdev);
12371 pci_restore_state(pdev);
12373 if (netif_running(dev))
12374 bnx2x_set_power_state(bp, PCI_D0);
12378 return PCI_ERS_RESULT_RECOVERED;
12382 * bnx2x_io_resume - called when traffic can start flowing again
12383 * @pdev: Pointer to PCI device
12385 * This callback is called when the error recovery driver tells us that
12386 * its OK to resume normal operation.
12388 static void bnx2x_io_resume(struct pci_dev *pdev)
12390 struct net_device *dev = pci_get_drvdata(pdev);
12391 struct bnx2x *bp = netdev_priv(dev);
12395 bnx2x_eeh_recover(bp);
12397 if (netif_running(dev))
12398 bnx2x_nic_load(bp, LOAD_NORMAL);
12400 netif_device_attach(dev);
12405 static struct pci_error_handlers bnx2x_err_handler = {
12406 .error_detected = bnx2x_io_error_detected,
12407 .slot_reset = bnx2x_io_slot_reset,
12408 .resume = bnx2x_io_resume,
12411 static struct pci_driver bnx2x_pci_driver = {
12412 .name = DRV_MODULE_NAME,
12413 .id_table = bnx2x_pci_tbl,
12414 .probe = bnx2x_init_one,
12415 .remove = __devexit_p(bnx2x_remove_one),
12416 .suspend = bnx2x_suspend,
12417 .resume = bnx2x_resume,
12418 .err_handler = &bnx2x_err_handler,
12421 static int __init bnx2x_init(void)
12425 printk(KERN_INFO "%s", version);
12427 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12428 if (bnx2x_wq == NULL) {
12429 printk(KERN_ERR PFX "Cannot create workqueue\n");
12433 ret = pci_register_driver(&bnx2x_pci_driver);
12435 printk(KERN_ERR PFX "Cannot register driver\n");
12436 destroy_workqueue(bnx2x_wq);
12441 static void __exit bnx2x_cleanup(void)
12443 pci_unregister_driver(&bnx2x_pci_driver);
12445 destroy_workqueue(bnx2x_wq);
12448 module_init(bnx2x_init);
12449 module_exit(bnx2x_cleanup);