1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.52.1"
60 #define DRV_MODULE_RELDATE "2009/08/12"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
154 * locking is done by mcp
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
176 static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
203 struct dmae_command dmae;
204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 memset(&dmae, 0, sizeof(struct dmae_command));
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 DMAE_CMD_ENDIANITY_DW_SWAP |
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248 mutex_lock(&bp->dmae_mutex);
252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
260 BNX2X_ERR("DMAE timeout!\n");
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
271 mutex_unlock(&bp->dmae_mutex);
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
276 struct dmae_command dmae;
277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 memset(&dmae, 0, sizeof(struct dmae_command));
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
299 DMAE_CMD_ENDIANITY_DW_SWAP |
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
320 mutex_lock(&bp->dmae_mutex);
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329 while (*wb_comp != DMAE_COMP_VAL) {
332 BNX2X_ERR("DMAE timeout!\n");
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
346 mutex_unlock(&bp->dmae_mutex);
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 REG_RD_DMAE(bp, reg, wb_data, 2);
381 return HILO_U64(wb_data[0], wb_data[1]);
385 static int bnx2x_mc_assert(struct bnx2x *bp)
389 u32 row0, row1, row2, row3;
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
506 static void bnx2x_fw_dump(struct bnx2x *bp)
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513 mark = ((mark + 0x3) & ~0x3);
514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
516 printk(KERN_ERR PFX);
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
522 printk(KERN_CONT "%s", (char *)data);
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
529 printk(KERN_CONT "%s", (char *)data);
531 printk(KERN_ERR PFX "end of fw dump\n");
534 static void bnx2x_panic_dump(struct bnx2x *bp)
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
542 BNX2X_ERR("begin crash dump -----------------\n");
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
553 for_each_rx_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i];
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
559 i, fp->rx_bd_prod, fp->rx_bd_cons,
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579 fp->status_blk->c_status_block.status_block_index,
580 fp->tx_db.data.prod);
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590 for (j = start; j != end; j = RX_BD(j + 1)) {
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
600 for (j = start; j != end; j = RX_SGE(j + 1)) {
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 BNX2X_ERR("end crash dump -----------------\n");
646 static void bnx2x_int_enable(struct bnx2x *bp)
648 int port = BP_PORT(bp);
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
673 REG_WR(bp, addr, val);
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
681 REG_WR(bp, addr, val);
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
693 /* enable nig and gpio3 attention */
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
702 /* Make sure that interrupts are indeed enabled from here on */
706 static void bnx2x_int_disable(struct bnx2x *bp)
708 int port = BP_PORT(bp);
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
720 /* flush all outstanding writes */
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
733 /* disable interrupt handling */
734 atomic_inc(&bp->intr_sem);
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
741 /* make sure all ISRs are done */
743 synchronize_irq(bp->msix_table[0].vector);
745 for_each_queue(bp, i)
746 synchronize_irq(bp->msix_table[i + offset].vector);
748 synchronize_irq(bp->pdev->irq);
750 /* make sure sp_task is not running */
751 cancel_delayed_work(&bp->sp_task);
752 flush_workqueue(bnx2x_wq);
758 * General service functions
761 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
762 u8 storm, u16 index, u8 op, u8 update)
764 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
765 COMMAND_REG_INT_ACK);
766 struct igu_ack_register igu_ack;
768 igu_ack.status_block_index = index;
769 igu_ack.sb_id_and_flags =
770 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
771 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
772 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
773 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
775 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
776 (*(u32 *)&igu_ack), hc_addr);
777 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
779 /* Make sure that ACK is written */
784 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
786 struct host_status_block *fpsb = fp->status_blk;
789 barrier(); /* status block is written to by the chip */
790 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
791 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
794 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
795 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
801 static u16 bnx2x_ack_int(struct bnx2x *bp)
803 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
804 COMMAND_REG_SIMD_MASK);
805 u32 result = REG_RD(bp, hc_addr);
807 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
815 * fast path service functions
818 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
820 /* Tell compiler that consumer and producer can change */
822 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
825 /* free skb in the packet ring at pos idx
826 * return idx of last bd freed
828 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
831 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
832 struct eth_tx_start_bd *tx_start_bd;
833 struct eth_tx_bd *tx_data_bd;
834 struct sk_buff *skb = tx_buf->skb;
835 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
838 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
842 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
843 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
844 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
845 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
848 #ifdef BNX2X_STOP_ON_ERROR
849 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
850 BNX2X_ERR("BAD nbd!\n");
854 new_cons = nbd + tx_buf->first_bd;
856 /* Get the next bd */
857 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859 /* Skip a parse bd... */
861 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863 /* ...and the TSO split header bd since they have no mapping */
864 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
872 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
873 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
874 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
875 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
882 dev_kfree_skb_any(skb);
883 tx_buf->first_bd = 0;
889 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
895 barrier(); /* Tell compiler that prod and cons can change */
896 prod = fp->tx_bd_prod;
897 cons = fp->tx_bd_cons;
899 /* NUM_TX_RINGS = number of "next-page" entries
900 It will be used as a threshold */
901 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
903 #ifdef BNX2X_STOP_ON_ERROR
905 WARN_ON(used > fp->bp->tx_ring_size);
906 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
909 return (s16)(fp->bp->tx_ring_size) - used;
912 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
914 struct bnx2x *bp = fp->bp;
915 struct netdev_queue *txq;
916 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
919 #ifdef BNX2X_STOP_ON_ERROR
920 if (unlikely(bp->panic))
924 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
925 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
926 sw_cons = fp->tx_pkt_cons;
928 while (sw_cons != hw_cons) {
931 pkt_cons = TX_BD(sw_cons);
933 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
935 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
936 hw_cons, sw_cons, pkt_cons);
938 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
940 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
943 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
948 fp->tx_pkt_cons = sw_cons;
949 fp->tx_bd_cons = bd_cons;
951 /* TBD need a thresh? */
952 if (unlikely(netif_tx_queue_stopped(txq))) {
954 /* Need to make the tx_bd_cons update visible to start_xmit()
955 * before checking for netif_tx_queue_stopped(). Without the
956 * memory barrier, there is a small possibility that
957 * start_xmit() will miss it and cause the queue to be stopped
962 if ((netif_tx_queue_stopped(txq)) &&
963 (bp->state == BNX2X_STATE_OPEN) &&
964 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
965 netif_tx_wake_queue(txq);
970 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
971 union eth_rx_cqe *rr_cqe)
973 struct bnx2x *bp = fp->bp;
974 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
975 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
978 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
979 fp->index, cid, command, bp->state,
980 rr_cqe->ramrod_cqe.ramrod_type);
985 switch (command | fp->state) {
986 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
987 BNX2X_FP_STATE_OPENING):
988 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
990 fp->state = BNX2X_FP_STATE_OPEN;
993 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
994 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
996 fp->state = BNX2X_FP_STATE_HALTED;
1000 BNX2X_ERR("unexpected MC reply (%d) "
1001 "fp->state is %x\n", command, fp->state);
1004 mb(); /* force bnx2x_wait_ramrod() to see the change */
1008 switch (command | bp->state) {
1009 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1010 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1011 bp->state = BNX2X_STATE_OPEN;
1014 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1015 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1016 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1017 fp->state = BNX2X_FP_STATE_HALTED;
1020 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1022 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1026 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1027 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1028 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1029 bp->set_mac_pending = 0;
1032 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1033 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1034 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1038 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1039 command, bp->state);
1042 mb(); /* force bnx2x_wait_ramrod() to see the change */
1045 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1046 struct bnx2x_fastpath *fp, u16 index)
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct page *page = sw_buf->page;
1050 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1052 /* Skip "next page" elements */
1056 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1057 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1060 sw_buf->page = NULL;
1065 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1066 struct bnx2x_fastpath *fp, int last)
1070 for (i = 0; i < last; i++)
1071 bnx2x_free_rx_sge(bp, fp, i);
1074 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1075 struct bnx2x_fastpath *fp, u16 index)
1077 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1078 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1079 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1082 if (unlikely(page == NULL))
1085 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1086 PCI_DMA_FROMDEVICE);
1087 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1088 __free_pages(page, PAGES_PER_SGE_SHIFT);
1092 sw_buf->page = page;
1093 pci_unmap_addr_set(sw_buf, mapping, mapping);
1095 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1096 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1101 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1102 struct bnx2x_fastpath *fp, u16 index)
1104 struct sk_buff *skb;
1105 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1106 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1109 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1110 if (unlikely(skb == NULL))
1113 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1114 PCI_DMA_FROMDEVICE);
1115 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1121 pci_unmap_addr_set(rx_buf, mapping, mapping);
1123 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1124 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1129 /* note that we are not allocating a new skb,
1130 * we are just moving one from cons to prod
1131 * we are not creating a new mapping,
1132 * so there is no need to check for dma_mapping_error().
1134 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1135 struct sk_buff *skb, u16 cons, u16 prod)
1137 struct bnx2x *bp = fp->bp;
1138 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1139 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1140 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1141 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1143 pci_dma_sync_single_for_device(bp->pdev,
1144 pci_unmap_addr(cons_rx_buf, mapping),
1145 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1147 prod_rx_buf->skb = cons_rx_buf->skb;
1148 pci_unmap_addr_set(prod_rx_buf, mapping,
1149 pci_unmap_addr(cons_rx_buf, mapping));
1150 *prod_bd = *cons_bd;
1153 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1156 u16 last_max = fp->last_max_sge;
1158 if (SUB_S16(idx, last_max) > 0)
1159 fp->last_max_sge = idx;
1162 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1166 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1167 int idx = RX_SGE_CNT * i - 1;
1169 for (j = 0; j < 2; j++) {
1170 SGE_MASK_CLEAR_BIT(fp, idx);
1176 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1177 struct eth_fast_path_rx_cqe *fp_cqe)
1179 struct bnx2x *bp = fp->bp;
1180 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1181 le16_to_cpu(fp_cqe->len_on_bd)) >>
1183 u16 last_max, last_elem, first_elem;
1190 /* First mark all used pages */
1191 for (i = 0; i < sge_len; i++)
1192 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1194 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1195 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1197 /* Here we assume that the last SGE index is the biggest */
1198 prefetch((void *)(fp->sge_mask));
1199 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1201 last_max = RX_SGE(fp->last_max_sge);
1202 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1203 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1205 /* If ring is not full */
1206 if (last_elem + 1 != first_elem)
1209 /* Now update the prod */
1210 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1211 if (likely(fp->sge_mask[i]))
1214 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1215 delta += RX_SGE_MASK_ELEM_SZ;
1219 fp->rx_sge_prod += delta;
1220 /* clear page-end entries */
1221 bnx2x_clear_sge_mask_next_elems(fp);
1224 DP(NETIF_MSG_RX_STATUS,
1225 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1226 fp->last_max_sge, fp->rx_sge_prod);
1229 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1231 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1232 memset(fp->sge_mask, 0xff,
1233 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1235 /* Clear the two last indices in the page to 1:
1236 these are the indices that correspond to the "next" element,
1237 hence will never be indicated and should be removed from
1238 the calculations. */
1239 bnx2x_clear_sge_mask_next_elems(fp);
1242 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1243 struct sk_buff *skb, u16 cons, u16 prod)
1245 struct bnx2x *bp = fp->bp;
1246 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1247 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1248 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1251 /* move empty skb from pool to prod and map it */
1252 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1253 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1254 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1255 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1257 /* move partial skb from cons to pool (don't unmap yet) */
1258 fp->tpa_pool[queue] = *cons_rx_buf;
1260 /* mark bin state as start - print error if current state != stop */
1261 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1262 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1264 fp->tpa_state[queue] = BNX2X_TPA_START;
1266 /* point prod_bd to new skb */
1267 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1268 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1270 #ifdef BNX2X_STOP_ON_ERROR
1271 fp->tpa_queue_used |= (1 << queue);
1272 #ifdef __powerpc64__
1273 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1275 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1277 fp->tpa_queue_used);
1281 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1282 struct sk_buff *skb,
1283 struct eth_fast_path_rx_cqe *fp_cqe,
1286 struct sw_rx_page *rx_pg, old_rx_pg;
1287 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1288 u32 i, frag_len, frag_size, pages;
1292 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1293 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1295 /* This is needed in order to enable forwarding support */
1297 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1298 max(frag_size, (u32)len_on_bd));
1300 #ifdef BNX2X_STOP_ON_ERROR
1302 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1303 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1305 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1306 fp_cqe->pkt_len, len_on_bd);
1312 /* Run through the SGL and compose the fragmented skb */
1313 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1314 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1316 /* FW gives the indices of the SGE as if the ring is an array
1317 (meaning that "next" element will consume 2 indices) */
1318 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1319 rx_pg = &fp->rx_page_ring[sge_idx];
1322 /* If we fail to allocate a substitute page, we simply stop
1323 where we are and drop the whole packet */
1324 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1325 if (unlikely(err)) {
1326 fp->eth_q_stats.rx_skb_alloc_failed++;
1330 /* Unmap the page as we r going to pass it to the stack */
1331 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1332 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1334 /* Add one frag and update the appropriate fields in the skb */
1335 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1337 skb->data_len += frag_len;
1338 skb->truesize += frag_len;
1339 skb->len += frag_len;
1341 frag_size -= frag_len;
1347 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1348 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1351 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1352 struct sk_buff *skb = rx_buf->skb;
1354 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1356 /* Unmap skb in the pool anyway, as we are going to change
1357 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1359 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1360 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1362 if (likely(new_skb)) {
1363 /* fix ip xsum and give it to the stack */
1364 /* (no need to map the new skb) */
1367 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1368 PARSING_FLAGS_VLAN);
1369 int is_not_hwaccel_vlan_cqe =
1370 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1374 prefetch(((char *)(skb)) + 128);
1376 #ifdef BNX2X_STOP_ON_ERROR
1377 if (pad + len > bp->rx_buf_size) {
1378 BNX2X_ERR("skb_put is about to fail... "
1379 "pad %d len %d rx_buf_size %d\n",
1380 pad, len, bp->rx_buf_size);
1386 skb_reserve(skb, pad);
1389 skb->protocol = eth_type_trans(skb, bp->dev);
1390 skb->ip_summed = CHECKSUM_UNNECESSARY;
1395 iph = (struct iphdr *)skb->data;
1397 /* If there is no Rx VLAN offloading -
1398 take VLAN tag into an account */
1399 if (unlikely(is_not_hwaccel_vlan_cqe))
1400 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1403 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1406 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1407 &cqe->fast_path_cqe, cqe_idx)) {
1409 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1410 (!is_not_hwaccel_vlan_cqe))
1411 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1412 le16_to_cpu(cqe->fast_path_cqe.
1416 netif_receive_skb(skb);
1418 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1419 " - dropping packet!\n");
1424 /* put new skb in bin */
1425 fp->tpa_pool[queue].skb = new_skb;
1428 /* else drop the packet and keep the buffer in the bin */
1429 DP(NETIF_MSG_RX_STATUS,
1430 "Failed to allocate new skb - dropping packet!\n");
1431 fp->eth_q_stats.rx_skb_alloc_failed++;
1434 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1437 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1438 struct bnx2x_fastpath *fp,
1439 u16 bd_prod, u16 rx_comp_prod,
1442 struct ustorm_eth_rx_producers rx_prods = {0};
1445 /* Update producers */
1446 rx_prods.bd_prod = bd_prod;
1447 rx_prods.cqe_prod = rx_comp_prod;
1448 rx_prods.sge_prod = rx_sge_prod;
1451 * Make sure that the BD and SGE data is updated before updating the
1452 * producers since FW might read the BD/SGE right after the producer
1454 * This is only applicable for weak-ordered memory model archs such
1455 * as IA-64. The following barrier is also mandatory since FW will
1456 * assumes BDs must have buffers.
1460 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1461 REG_WR(bp, BAR_USTRORM_INTMEM +
1462 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1463 ((u32 *)&rx_prods)[i]);
1465 mmiowb(); /* keep prod updates ordered */
1467 DP(NETIF_MSG_RX_STATUS,
1468 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1469 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1472 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1474 struct bnx2x *bp = fp->bp;
1475 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1476 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1479 #ifdef BNX2X_STOP_ON_ERROR
1480 if (unlikely(bp->panic))
1484 /* CQ "next element" is of the size of the regular element,
1485 that's why it's ok here */
1486 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1487 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1490 bd_cons = fp->rx_bd_cons;
1491 bd_prod = fp->rx_bd_prod;
1492 bd_prod_fw = bd_prod;
1493 sw_comp_cons = fp->rx_comp_cons;
1494 sw_comp_prod = fp->rx_comp_prod;
1496 /* Memory barrier necessary as speculative reads of the rx
1497 * buffer can be ahead of the index in the status block
1501 DP(NETIF_MSG_RX_STATUS,
1502 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1503 fp->index, hw_comp_cons, sw_comp_cons);
1505 while (sw_comp_cons != hw_comp_cons) {
1506 struct sw_rx_bd *rx_buf = NULL;
1507 struct sk_buff *skb;
1508 union eth_rx_cqe *cqe;
1512 comp_ring_cons = RCQ_BD(sw_comp_cons);
1513 bd_prod = RX_BD(bd_prod);
1514 bd_cons = RX_BD(bd_cons);
1516 /* Prefetch the page containing the BD descriptor
1517 at producer's index. It will be needed when new skb is
1519 prefetch((void *)(PAGE_ALIGN((unsigned long)
1520 (&fp->rx_desc_ring[bd_prod])) -
1523 cqe = &fp->rx_comp_ring[comp_ring_cons];
1524 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1526 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1527 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1528 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1529 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1530 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1531 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1533 /* is this a slowpath msg? */
1534 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1535 bnx2x_sp_event(fp, cqe);
1538 /* this is an rx packet */
1540 rx_buf = &fp->rx_buf_ring[bd_cons];
1542 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1543 pad = cqe->fast_path_cqe.placement_offset;
1545 /* If CQE is marked both TPA_START and TPA_END
1546 it is a non-TPA CQE */
1547 if ((!fp->disable_tpa) &&
1548 (TPA_TYPE(cqe_fp_flags) !=
1549 (TPA_TYPE_START | TPA_TYPE_END))) {
1550 u16 queue = cqe->fast_path_cqe.queue_index;
1552 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1553 DP(NETIF_MSG_RX_STATUS,
1554 "calling tpa_start on queue %d\n",
1557 bnx2x_tpa_start(fp, queue, skb,
1562 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1563 DP(NETIF_MSG_RX_STATUS,
1564 "calling tpa_stop on queue %d\n",
1567 if (!BNX2X_RX_SUM_FIX(cqe))
1568 BNX2X_ERR("STOP on none TCP "
1571 /* This is a size of the linear data
1573 len = le16_to_cpu(cqe->fast_path_cqe.
1575 bnx2x_tpa_stop(bp, fp, queue, pad,
1576 len, cqe, comp_ring_cons);
1577 #ifdef BNX2X_STOP_ON_ERROR
1582 bnx2x_update_sge_prod(fp,
1583 &cqe->fast_path_cqe);
1588 pci_dma_sync_single_for_device(bp->pdev,
1589 pci_unmap_addr(rx_buf, mapping),
1590 pad + RX_COPY_THRESH,
1591 PCI_DMA_FROMDEVICE);
1593 prefetch(((char *)(skb)) + 128);
1595 /* is this an error packet? */
1596 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1597 DP(NETIF_MSG_RX_ERR,
1598 "ERROR flags %x rx packet %u\n",
1599 cqe_fp_flags, sw_comp_cons);
1600 fp->eth_q_stats.rx_err_discard_pkt++;
1604 /* Since we don't have a jumbo ring
1605 * copy small packets if mtu > 1500
1607 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1608 (len <= RX_COPY_THRESH)) {
1609 struct sk_buff *new_skb;
1611 new_skb = netdev_alloc_skb(bp->dev,
1613 if (new_skb == NULL) {
1614 DP(NETIF_MSG_RX_ERR,
1615 "ERROR packet dropped "
1616 "because of alloc failure\n");
1617 fp->eth_q_stats.rx_skb_alloc_failed++;
1622 skb_copy_from_linear_data_offset(skb, pad,
1623 new_skb->data + pad, len);
1624 skb_reserve(new_skb, pad);
1625 skb_put(new_skb, len);
1627 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1632 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1633 pci_unmap_single(bp->pdev,
1634 pci_unmap_addr(rx_buf, mapping),
1636 PCI_DMA_FROMDEVICE);
1637 skb_reserve(skb, pad);
1641 DP(NETIF_MSG_RX_ERR,
1642 "ERROR packet dropped because "
1643 "of alloc failure\n");
1644 fp->eth_q_stats.rx_skb_alloc_failed++;
1646 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1650 skb->protocol = eth_type_trans(skb, bp->dev);
1652 skb->ip_summed = CHECKSUM_NONE;
1654 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1655 skb->ip_summed = CHECKSUM_UNNECESSARY;
1657 fp->eth_q_stats.hw_csum_err++;
1661 skb_record_rx_queue(skb, fp->index);
1664 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1665 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1666 PARSING_FLAGS_VLAN))
1667 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1668 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1671 netif_receive_skb(skb);
1677 bd_cons = NEXT_RX_IDX(bd_cons);
1678 bd_prod = NEXT_RX_IDX(bd_prod);
1679 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1682 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1683 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1685 if (rx_pkt == budget)
1689 fp->rx_bd_cons = bd_cons;
1690 fp->rx_bd_prod = bd_prod_fw;
1691 fp->rx_comp_cons = sw_comp_cons;
1692 fp->rx_comp_prod = sw_comp_prod;
1694 /* Update producers */
1695 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1698 fp->rx_pkt += rx_pkt;
1704 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1706 struct bnx2x_fastpath *fp = fp_cookie;
1707 struct bnx2x *bp = fp->bp;
1709 /* Return here if interrupt is disabled */
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1715 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1716 fp->index, fp->sb_id);
1717 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1719 #ifdef BNX2X_STOP_ON_ERROR
1720 if (unlikely(bp->panic))
1723 /* Handle Rx or Tx according to MSI-X vector */
1724 if (fp->is_rx_queue) {
1725 prefetch(fp->rx_cons_sb);
1726 prefetch(&fp->status_blk->u_status_block.status_block_index);
1728 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1731 prefetch(fp->tx_cons_sb);
1732 prefetch(&fp->status_blk->c_status_block.status_block_index);
1734 bnx2x_update_fpsb_idx(fp);
1738 /* Re-enable interrupts */
1739 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1740 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1741 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1742 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1748 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1750 struct bnx2x *bp = netdev_priv(dev_instance);
1751 u16 status = bnx2x_ack_int(bp);
1755 /* Return here if interrupt is shared and it's not for us */
1756 if (unlikely(status == 0)) {
1757 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1760 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1762 /* Return here if interrupt is disabled */
1763 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1764 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1768 #ifdef BNX2X_STOP_ON_ERROR
1769 if (unlikely(bp->panic))
1773 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1774 struct bnx2x_fastpath *fp = &bp->fp[i];
1776 mask = 0x2 << fp->sb_id;
1777 if (status & mask) {
1778 /* Handle Rx or Tx according to SB id */
1779 if (fp->is_rx_queue) {
1780 prefetch(fp->rx_cons_sb);
1781 prefetch(&fp->status_blk->u_status_block.
1782 status_block_index);
1784 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1787 prefetch(fp->tx_cons_sb);
1788 prefetch(&fp->status_blk->c_status_block.
1789 status_block_index);
1791 bnx2x_update_fpsb_idx(fp);
1795 /* Re-enable interrupts */
1796 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1797 le16_to_cpu(fp->fp_u_idx),
1799 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1800 le16_to_cpu(fp->fp_c_idx),
1808 if (unlikely(status & 0x1)) {
1809 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1817 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1823 /* end of fast path */
1825 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1830 * General service functions
1833 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1836 u32 resource_bit = (1 << resource);
1837 int func = BP_FUNC(bp);
1838 u32 hw_lock_control_reg;
1841 /* Validating that the resource is within range */
1842 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1844 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1845 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1850 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1852 hw_lock_control_reg =
1853 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1856 /* Validating that the resource is not already taken */
1857 lock_status = REG_RD(bp, hw_lock_control_reg);
1858 if (lock_status & resource_bit) {
1859 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1860 lock_status, resource_bit);
1864 /* Try for 5 second every 5ms */
1865 for (cnt = 0; cnt < 1000; cnt++) {
1866 /* Try to acquire the lock */
1867 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1868 lock_status = REG_RD(bp, hw_lock_control_reg);
1869 if (lock_status & resource_bit)
1874 DP(NETIF_MSG_HW, "Timeout\n");
1878 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1881 u32 resource_bit = (1 << resource);
1882 int func = BP_FUNC(bp);
1883 u32 hw_lock_control_reg;
1885 /* Validating that the resource is within range */
1886 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1888 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1889 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1894 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1896 hw_lock_control_reg =
1897 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1900 /* Validating that the resource is currently taken */
1901 lock_status = REG_RD(bp, hw_lock_control_reg);
1902 if (!(lock_status & resource_bit)) {
1903 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1904 lock_status, resource_bit);
1908 REG_WR(bp, hw_lock_control_reg, resource_bit);
1912 /* HW Lock for shared dual port PHYs */
1913 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1915 mutex_lock(&bp->port.phy_mutex);
1917 if (bp->port.need_hw_lock)
1918 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1921 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1923 if (bp->port.need_hw_lock)
1924 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1926 mutex_unlock(&bp->port.phy_mutex);
1929 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1931 /* The GPIO should be swapped if swap register is set and active */
1932 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1933 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1934 int gpio_shift = gpio_num +
1935 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1936 u32 gpio_mask = (1 << gpio_shift);
1940 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1941 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1945 /* read GPIO value */
1946 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1948 /* get the requested pin value */
1949 if ((gpio_reg & gpio_mask) == gpio_mask)
1954 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1959 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1961 /* The GPIO should be swapped if swap register is set and active */
1962 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1963 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1964 int gpio_shift = gpio_num +
1965 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1966 u32 gpio_mask = (1 << gpio_shift);
1969 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1974 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1975 /* read GPIO and mask except the float bits */
1976 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1979 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1980 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1981 gpio_num, gpio_shift);
1982 /* clear FLOAT and set CLR */
1983 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1984 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1987 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1988 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1989 gpio_num, gpio_shift);
1990 /* clear FLOAT and set SET */
1991 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1992 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1995 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1996 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1997 gpio_num, gpio_shift);
1999 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2007 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2012 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2014 /* The GPIO should be swapped if swap register is set and active */
2015 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2016 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2017 int gpio_shift = gpio_num +
2018 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2019 u32 gpio_mask = (1 << gpio_shift);
2022 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2023 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2027 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2029 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2032 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2033 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2034 "output low\n", gpio_num, gpio_shift);
2035 /* clear SET and set CLR */
2036 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2037 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2040 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2041 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2042 "output high\n", gpio_num, gpio_shift);
2043 /* clear CLR and set SET */
2044 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2045 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2053 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2058 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2060 u32 spio_mask = (1 << spio_num);
2063 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2064 (spio_num > MISC_REGISTERS_SPIO_7)) {
2065 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2069 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2070 /* read SPIO and mask except the float bits */
2071 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2074 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2075 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2076 /* clear FLOAT and set CLR */
2077 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2078 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2081 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2082 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2083 /* clear FLOAT and set SET */
2084 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2085 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2088 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2089 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2099 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2104 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2106 switch (bp->link_vars.ieee_fc &
2107 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2108 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2109 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2113 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2114 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2118 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2119 bp->port.advertising |= ADVERTISED_Asym_Pause;
2123 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2129 static void bnx2x_link_report(struct bnx2x *bp)
2131 if (bp->state == BNX2X_STATE_DISABLED) {
2132 netif_carrier_off(bp->dev);
2133 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2137 if (bp->link_vars.link_up) {
2138 if (bp->state == BNX2X_STATE_OPEN)
2139 netif_carrier_on(bp->dev);
2140 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2142 printk("%d Mbps ", bp->link_vars.line_speed);
2144 if (bp->link_vars.duplex == DUPLEX_FULL)
2145 printk("full duplex");
2147 printk("half duplex");
2149 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2150 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2151 printk(", receive ");
2152 if (bp->link_vars.flow_ctrl &
2154 printk("& transmit ");
2156 printk(", transmit ");
2158 printk("flow control ON");
2162 } else { /* link_down */
2163 netif_carrier_off(bp->dev);
2164 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2168 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2170 if (!BP_NOMCP(bp)) {
2173 /* Initialize link parameters structure variables */
2174 /* It is recommended to turn off RX FC for jumbo frames
2175 for better performance */
2176 if (bp->dev->mtu > 5000)
2177 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2179 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2181 bnx2x_acquire_phy_lock(bp);
2183 if (load_mode == LOAD_DIAG)
2184 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2186 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2188 bnx2x_release_phy_lock(bp);
2190 bnx2x_calc_fc_adv(bp);
2192 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2193 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2194 bnx2x_link_report(bp);
2199 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2203 static void bnx2x_link_set(struct bnx2x *bp)
2205 if (!BP_NOMCP(bp)) {
2206 bnx2x_acquire_phy_lock(bp);
2207 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2208 bnx2x_release_phy_lock(bp);
2210 bnx2x_calc_fc_adv(bp);
2212 BNX2X_ERR("Bootcode is missing - can not set link\n");
2215 static void bnx2x__link_reset(struct bnx2x *bp)
2217 if (!BP_NOMCP(bp)) {
2218 bnx2x_acquire_phy_lock(bp);
2219 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2220 bnx2x_release_phy_lock(bp);
2222 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2225 static u8 bnx2x_link_test(struct bnx2x *bp)
2229 bnx2x_acquire_phy_lock(bp);
2230 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2231 bnx2x_release_phy_lock(bp);
2236 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2238 u32 r_param = bp->link_vars.line_speed / 8;
2239 u32 fair_periodic_timeout_usec;
2242 memset(&(bp->cmng.rs_vars), 0,
2243 sizeof(struct rate_shaping_vars_per_port));
2244 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2246 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2247 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2249 /* this is the threshold below which no timer arming will occur
2250 1.25 coefficient is for the threshold to be a little bigger
2251 than the real time, to compensate for timer in-accuracy */
2252 bp->cmng.rs_vars.rs_threshold =
2253 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2255 /* resolution of fairness timer */
2256 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2257 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2258 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2260 /* this is the threshold below which we won't arm the timer anymore */
2261 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2263 /* we multiply by 1e3/8 to get bytes/msec.
2264 We don't want the credits to pass a credit
2265 of the t_fair*FAIR_MEM (algorithm resolution) */
2266 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2267 /* since each tick is 4 usec */
2268 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2271 /* Calculates the sum of vn_min_rates.
2272 It's needed for further normalizing of the min_rates.
2274 sum of vn_min_rates.
2276 0 - if all the min_rates are 0.
2277 In the later case fainess algorithm should be deactivated.
2278 If not all min_rates are zero then those that are zeroes will be set to 1.
2280 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2283 int port = BP_PORT(bp);
2286 bp->vn_weight_sum = 0;
2287 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2288 int func = 2*vn + port;
2289 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2290 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2291 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2293 /* Skip hidden vns */
2294 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2297 /* If min rate is zero - set it to 1 */
2299 vn_min_rate = DEF_MIN_RATE;
2303 bp->vn_weight_sum += vn_min_rate;
2306 /* ... only if all min rates are zeros - disable fairness */
2308 bp->vn_weight_sum = 0;
2311 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2313 struct rate_shaping_vars_per_vn m_rs_vn;
2314 struct fairness_vars_per_vn m_fair_vn;
2315 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2316 u16 vn_min_rate, vn_max_rate;
2319 /* If function is hidden - set min and max to zeroes */
2320 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2325 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2326 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2327 /* If fairness is enabled (not all min rates are zeroes) and
2328 if current min rate is zero - set it to 1.
2329 This is a requirement of the algorithm. */
2330 if (bp->vn_weight_sum && (vn_min_rate == 0))
2331 vn_min_rate = DEF_MIN_RATE;
2332 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2333 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2337 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2338 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2340 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2341 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2343 /* global vn counter - maximal Mbps for this vn */
2344 m_rs_vn.vn_counter.rate = vn_max_rate;
2346 /* quota - number of bytes transmitted in this period */
2347 m_rs_vn.vn_counter.quota =
2348 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2350 if (bp->vn_weight_sum) {
2351 /* credit for each period of the fairness algorithm:
2352 number of bytes in T_FAIR (the vn share the port rate).
2353 vn_weight_sum should not be larger than 10000, thus
2354 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2356 m_fair_vn.vn_credit_delta =
2357 max((u32)(vn_min_rate * (T_FAIR_COEF /
2358 (8 * bp->vn_weight_sum))),
2359 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2360 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2361 m_fair_vn.vn_credit_delta);
2364 /* Store it to internal memory */
2365 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2366 REG_WR(bp, BAR_XSTRORM_INTMEM +
2367 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2368 ((u32 *)(&m_rs_vn))[i]);
2370 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2371 REG_WR(bp, BAR_XSTRORM_INTMEM +
2372 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2373 ((u32 *)(&m_fair_vn))[i]);
2377 /* This function is called upon link interrupt */
2378 static void bnx2x_link_attn(struct bnx2x *bp)
2380 /* Make sure that we are synced with the current statistics */
2381 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2383 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2385 if (bp->link_vars.link_up) {
2387 /* dropless flow control */
2388 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2389 int port = BP_PORT(bp);
2390 u32 pause_enabled = 0;
2392 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2395 REG_WR(bp, BAR_USTRORM_INTMEM +
2396 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2400 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2401 struct host_port_stats *pstats;
2403 pstats = bnx2x_sp(bp, port_stats);
2404 /* reset old bmac stats */
2405 memset(&(pstats->mac_stx[0]), 0,
2406 sizeof(struct mac_stx));
2408 if ((bp->state == BNX2X_STATE_OPEN) ||
2409 (bp->state == BNX2X_STATE_DISABLED))
2410 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2413 /* indicate link status */
2414 bnx2x_link_report(bp);
2417 int port = BP_PORT(bp);
2421 /* Set the attention towards other drivers on the same port */
2422 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2423 if (vn == BP_E1HVN(bp))
2426 func = ((vn << 1) | port);
2427 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2428 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2431 if (bp->link_vars.link_up) {
2434 /* Init rate shaping and fairness contexts */
2435 bnx2x_init_port_minmax(bp);
2437 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2438 bnx2x_init_vn_minmax(bp, 2*vn + port);
2440 /* Store it to internal memory */
2442 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2443 REG_WR(bp, BAR_XSTRORM_INTMEM +
2444 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2445 ((u32 *)(&bp->cmng))[i]);
2450 static void bnx2x__link_status_update(struct bnx2x *bp)
2452 int func = BP_FUNC(bp);
2454 if (bp->state != BNX2X_STATE_OPEN)
2457 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2459 if (bp->link_vars.link_up)
2460 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2462 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2464 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2465 bnx2x_calc_vn_weight_sum(bp);
2467 /* indicate link status */
2468 bnx2x_link_report(bp);
2471 static void bnx2x_pmf_update(struct bnx2x *bp)
2473 int port = BP_PORT(bp);
2477 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2479 /* enable nig attention */
2480 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2481 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2482 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2484 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2492 * General service functions
2495 /* send the MCP a request, block until there is a reply */
2496 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2498 int func = BP_FUNC(bp);
2499 u32 seq = ++bp->fw_seq;
2502 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2504 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2505 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2508 /* let the FW do it's magic ... */
2511 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2513 /* Give the FW up to 2 second (200*10ms) */
2514 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2516 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2517 cnt*delay, rc, seq);
2519 /* is this a reply to our command? */
2520 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2521 rc &= FW_MSG_CODE_MASK;
2524 BNX2X_ERR("FW failed to respond!\n");
2532 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2533 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2534 static void bnx2x_set_rx_mode(struct net_device *dev);
2536 static void bnx2x_e1h_disable(struct bnx2x *bp)
2538 int port = BP_PORT(bp);
2541 bp->rx_mode = BNX2X_RX_MODE_NONE;
2542 bnx2x_set_storm_rx_mode(bp);
2544 netif_tx_disable(bp->dev);
2545 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2547 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2549 bnx2x_set_mac_addr_e1h(bp, 0);
2551 for (i = 0; i < MC_HASH_SIZE; i++)
2552 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2554 netif_carrier_off(bp->dev);
2557 static void bnx2x_e1h_enable(struct bnx2x *bp)
2559 int port = BP_PORT(bp);
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2563 bnx2x_set_mac_addr_e1h(bp, 1);
2565 /* Tx queue should be only reenabled */
2566 netif_tx_wake_all_queues(bp->dev);
2568 /* Initialize the receive filter. */
2569 bnx2x_set_rx_mode(bp->dev);
2572 static void bnx2x_update_min_max(struct bnx2x *bp)
2574 int port = BP_PORT(bp);
2577 /* Init rate shaping and fairness contexts */
2578 bnx2x_init_port_minmax(bp);
2580 bnx2x_calc_vn_weight_sum(bp);
2582 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2583 bnx2x_init_vn_minmax(bp, 2*vn + port);
2588 /* Set the attention towards other drivers on the same port */
2589 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2590 if (vn == BP_E1HVN(bp))
2593 func = ((vn << 1) | port);
2594 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2595 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2598 /* Store it to internal memory */
2599 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2600 REG_WR(bp, BAR_XSTRORM_INTMEM +
2601 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2602 ((u32 *)(&bp->cmng))[i]);
2606 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2608 int func = BP_FUNC(bp);
2610 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2611 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2613 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2615 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2616 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2617 bp->state = BNX2X_STATE_DISABLED;
2619 bnx2x_e1h_disable(bp);
2621 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2622 bp->state = BNX2X_STATE_OPEN;
2624 bnx2x_e1h_enable(bp);
2626 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2628 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2630 bnx2x_update_min_max(bp);
2631 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2634 /* Report results to MCP */
2636 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2638 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2641 /* must be called under the spq lock */
2642 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2644 struct eth_spe *next_spe = bp->spq_prod_bd;
2646 if (bp->spq_prod_bd == bp->spq_last_bd) {
2647 bp->spq_prod_bd = bp->spq;
2648 bp->spq_prod_idx = 0;
2649 DP(NETIF_MSG_TIMER, "end of spq\n");
2657 /* must be called under the spq lock */
2658 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2660 int func = BP_FUNC(bp);
2662 /* Make sure that BD data is updated before writing the producer */
2665 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2670 /* the slow path queue is odd since completions arrive on the fastpath ring */
2671 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2672 u32 data_hi, u32 data_lo, int common)
2674 struct eth_spe *spe;
2676 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2677 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2678 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2679 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2680 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2682 #ifdef BNX2X_STOP_ON_ERROR
2683 if (unlikely(bp->panic))
2687 spin_lock_bh(&bp->spq_lock);
2689 if (!bp->spq_left) {
2690 BNX2X_ERR("BUG! SPQ ring full!\n");
2691 spin_unlock_bh(&bp->spq_lock);
2696 spe = bnx2x_sp_get_next(bp);
2698 /* CID needs port number to be encoded int it */
2699 spe->hdr.conn_and_cmd_data =
2700 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2702 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2705 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2707 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2708 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2712 bnx2x_sp_prod_update(bp);
2713 spin_unlock_bh(&bp->spq_lock);
2717 /* acquire split MCP access lock register */
2718 static int bnx2x_acquire_alr(struct bnx2x *bp)
2725 for (j = 0; j < i*10; j++) {
2727 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2728 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2729 if (val & (1L << 31))
2734 if (!(val & (1L << 31))) {
2735 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2742 /* release split MCP access lock register */
2743 static void bnx2x_release_alr(struct bnx2x *bp)
2747 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2750 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2752 struct host_def_status_block *def_sb = bp->def_status_blk;
2755 barrier(); /* status block is written to by the chip */
2756 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2757 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2760 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2761 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2764 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2765 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2768 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2769 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2772 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2773 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2780 * slow path service functions
2783 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2785 int port = BP_PORT(bp);
2786 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2787 COMMAND_REG_ATTN_BITS_SET);
2788 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2789 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2790 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2791 NIG_REG_MASK_INTERRUPT_PORT0;
2795 if (bp->attn_state & asserted)
2796 BNX2X_ERR("IGU ERROR\n");
2798 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2799 aeu_mask = REG_RD(bp, aeu_addr);
2801 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2802 aeu_mask, asserted);
2803 aeu_mask &= ~(asserted & 0xff);
2804 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2806 REG_WR(bp, aeu_addr, aeu_mask);
2807 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2809 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2810 bp->attn_state |= asserted;
2811 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2813 if (asserted & ATTN_HARD_WIRED_MASK) {
2814 if (asserted & ATTN_NIG_FOR_FUNC) {
2816 bnx2x_acquire_phy_lock(bp);
2818 /* save nig interrupt mask */
2819 nig_mask = REG_RD(bp, nig_int_mask_addr);
2820 REG_WR(bp, nig_int_mask_addr, 0);
2822 bnx2x_link_attn(bp);
2824 /* handle unicore attn? */
2826 if (asserted & ATTN_SW_TIMER_4_FUNC)
2827 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2829 if (asserted & GPIO_2_FUNC)
2830 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2832 if (asserted & GPIO_3_FUNC)
2833 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2835 if (asserted & GPIO_4_FUNC)
2836 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2839 if (asserted & ATTN_GENERAL_ATTN_1) {
2840 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2841 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2843 if (asserted & ATTN_GENERAL_ATTN_2) {
2844 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2845 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2847 if (asserted & ATTN_GENERAL_ATTN_3) {
2848 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2849 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2852 if (asserted & ATTN_GENERAL_ATTN_4) {
2853 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2854 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2856 if (asserted & ATTN_GENERAL_ATTN_5) {
2857 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2858 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2860 if (asserted & ATTN_GENERAL_ATTN_6) {
2861 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2862 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2866 } /* if hardwired */
2868 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2870 REG_WR(bp, hc_addr, asserted);
2872 /* now set back the mask */
2873 if (asserted & ATTN_NIG_FOR_FUNC) {
2874 REG_WR(bp, nig_int_mask_addr, nig_mask);
2875 bnx2x_release_phy_lock(bp);
2879 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2881 int port = BP_PORT(bp);
2883 /* mark the failure */
2884 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2885 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2886 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2887 bp->link_params.ext_phy_config);
2889 /* log the failure */
2890 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2891 " the driver to shutdown the card to prevent permanent"
2892 " damage. Please contact Dell Support for assistance\n",
2896 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2898 int port = BP_PORT(bp);
2900 u32 val, swap_val, swap_override;
2902 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2903 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2905 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2907 val = REG_RD(bp, reg_offset);
2908 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2909 REG_WR(bp, reg_offset, val);
2911 BNX2X_ERR("SPIO5 hw attention\n");
2913 /* Fan failure attention */
2914 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2915 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2916 /* Low power mode is controlled by GPIO 2 */
2917 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2918 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2919 /* The PHY reset is controlled by GPIO 1 */
2920 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2921 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2924 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2925 /* The PHY reset is controlled by GPIO 1 */
2926 /* fake the port number to cancel the swap done in
2928 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2929 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2930 port = (swap_val && swap_override) ^ 1;
2931 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2938 bnx2x_fan_failure(bp);
2941 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2942 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2943 bnx2x_acquire_phy_lock(bp);
2944 bnx2x_handle_module_detect_int(&bp->link_params);
2945 bnx2x_release_phy_lock(bp);
2948 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2950 val = REG_RD(bp, reg_offset);
2951 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2952 REG_WR(bp, reg_offset, val);
2954 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2955 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2960 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2964 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2966 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2967 BNX2X_ERR("DB hw attention 0x%x\n", val);
2968 /* DORQ discard attention */
2970 BNX2X_ERR("FATAL error from DORQ\n");
2973 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2975 int port = BP_PORT(bp);
2978 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2979 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2981 val = REG_RD(bp, reg_offset);
2982 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2983 REG_WR(bp, reg_offset, val);
2985 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2986 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2991 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2995 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2997 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2998 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2999 /* CFC error attention */
3001 BNX2X_ERR("FATAL error from CFC\n");
3004 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3006 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3007 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3008 /* RQ_USDMDP_FIFO_OVERFLOW */
3010 BNX2X_ERR("FATAL error from PXP\n");
3013 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3015 int port = BP_PORT(bp);
3018 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3019 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3021 val = REG_RD(bp, reg_offset);
3022 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3023 REG_WR(bp, reg_offset, val);
3025 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3026 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3031 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3035 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3037 if (attn & BNX2X_PMF_LINK_ASSERT) {
3038 int func = BP_FUNC(bp);
3040 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3041 val = SHMEM_RD(bp, func_mb[func].drv_status);
3042 if (val & DRV_STATUS_DCC_EVENT_MASK)
3044 (val & DRV_STATUS_DCC_EVENT_MASK));
3045 bnx2x__link_status_update(bp);
3046 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3047 bnx2x_pmf_update(bp);
3049 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3051 BNX2X_ERR("MC assert!\n");
3052 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3053 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3054 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3055 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3058 } else if (attn & BNX2X_MCP_ASSERT) {
3060 BNX2X_ERR("MCP assert!\n");
3061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3065 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3068 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3069 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3070 if (attn & BNX2X_GRC_TIMEOUT) {
3071 val = CHIP_IS_E1H(bp) ?
3072 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3073 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3075 if (attn & BNX2X_GRC_RSV) {
3076 val = CHIP_IS_E1H(bp) ?
3077 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3078 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3080 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3084 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3086 struct attn_route attn;
3087 struct attn_route group_mask;
3088 int port = BP_PORT(bp);
3094 /* need to take HW lock because MCP or other port might also
3095 try to handle this event */
3096 bnx2x_acquire_alr(bp);
3098 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3099 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3100 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3101 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3102 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3103 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3105 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3106 if (deasserted & (1 << index)) {
3107 group_mask = bp->attn_group[index];
3109 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3110 index, group_mask.sig[0], group_mask.sig[1],
3111 group_mask.sig[2], group_mask.sig[3]);
3113 bnx2x_attn_int_deasserted3(bp,
3114 attn.sig[3] & group_mask.sig[3]);
3115 bnx2x_attn_int_deasserted1(bp,
3116 attn.sig[1] & group_mask.sig[1]);
3117 bnx2x_attn_int_deasserted2(bp,
3118 attn.sig[2] & group_mask.sig[2]);
3119 bnx2x_attn_int_deasserted0(bp,
3120 attn.sig[0] & group_mask.sig[0]);
3122 if ((attn.sig[0] & group_mask.sig[0] &
3123 HW_PRTY_ASSERT_SET_0) ||
3124 (attn.sig[1] & group_mask.sig[1] &
3125 HW_PRTY_ASSERT_SET_1) ||
3126 (attn.sig[2] & group_mask.sig[2] &
3127 HW_PRTY_ASSERT_SET_2))
3128 BNX2X_ERR("FATAL HW block parity attention\n");
3132 bnx2x_release_alr(bp);
3134 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3137 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3139 REG_WR(bp, reg_addr, val);
3141 if (~bp->attn_state & deasserted)
3142 BNX2X_ERR("IGU ERROR\n");
3144 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3145 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3147 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3148 aeu_mask = REG_RD(bp, reg_addr);
3150 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3151 aeu_mask, deasserted);
3152 aeu_mask |= (deasserted & 0xff);
3153 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3155 REG_WR(bp, reg_addr, aeu_mask);
3156 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3158 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3159 bp->attn_state &= ~deasserted;
3160 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3163 static void bnx2x_attn_int(struct bnx2x *bp)
3165 /* read local copy of bits */
3166 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3168 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3170 u32 attn_state = bp->attn_state;
3172 /* look for changed bits */
3173 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3174 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3177 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3178 attn_bits, attn_ack, asserted, deasserted);
3180 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3181 BNX2X_ERR("BAD attention state\n");
3183 /* handle bits that were raised */
3185 bnx2x_attn_int_asserted(bp, asserted);
3188 bnx2x_attn_int_deasserted(bp, deasserted);
3191 static void bnx2x_sp_task(struct work_struct *work)
3193 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3197 /* Return here if interrupt is disabled */
3198 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3199 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3203 status = bnx2x_update_dsb_idx(bp);
3204 /* if (status == 0) */
3205 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3207 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3213 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3215 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3217 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3219 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3221 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3226 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3228 struct net_device *dev = dev_instance;
3229 struct bnx2x *bp = netdev_priv(dev);
3231 /* Return here if interrupt is disabled */
3232 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3233 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3237 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3239 #ifdef BNX2X_STOP_ON_ERROR
3240 if (unlikely(bp->panic))
3244 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3249 /* end of slow path */
3253 /****************************************************************************
3255 ****************************************************************************/
3257 /* sum[hi:lo] += add[hi:lo] */
3258 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3261 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3264 /* difference = minuend - subtrahend */
3265 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3267 if (m_lo < s_lo) { \
3269 d_hi = m_hi - s_hi; \
3271 /* we can 'loan' 1 */ \
3273 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3275 /* m_hi <= s_hi */ \
3280 /* m_lo >= s_lo */ \
3281 if (m_hi < s_hi) { \
3285 /* m_hi >= s_hi */ \
3286 d_hi = m_hi - s_hi; \
3287 d_lo = m_lo - s_lo; \
3292 #define UPDATE_STAT64(s, t) \
3294 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3295 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3296 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3297 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3298 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3299 pstats->mac_stx[1].t##_lo, diff.lo); \
3302 #define UPDATE_STAT64_NIG(s, t) \
3304 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3305 diff.lo, new->s##_lo, old->s##_lo); \
3306 ADD_64(estats->t##_hi, diff.hi, \
3307 estats->t##_lo, diff.lo); \
3310 /* sum[hi:lo] += add */
3311 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3314 s_hi += (s_lo < a) ? 1 : 0; \
3317 #define UPDATE_EXTEND_STAT(s) \
3319 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3320 pstats->mac_stx[1].s##_lo, \
3324 #define UPDATE_EXTEND_TSTAT(s, t) \
3326 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3327 old_tclient->s = tclient->s; \
3328 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3331 #define UPDATE_EXTEND_USTAT(s, t) \
3333 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3334 old_uclient->s = uclient->s; \
3335 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3338 #define UPDATE_EXTEND_XSTAT(s, t) \
3340 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3341 old_xclient->s = xclient->s; \
3342 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3345 /* minuend -= subtrahend */
3346 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3348 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3351 /* minuend[hi:lo] -= subtrahend */
3352 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3354 SUB_64(m_hi, 0, m_lo, s); \
3357 #define SUB_EXTEND_USTAT(s, t) \
3359 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3360 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3364 * General service functions
3367 static inline long bnx2x_hilo(u32 *hiref)
3369 u32 lo = *(hiref + 1);
3370 #if (BITS_PER_LONG == 64)
3373 return HILO_U64(hi, lo);
3380 * Init service functions
3383 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3385 if (!bp->stats_pending) {
3386 struct eth_query_ramrod_data ramrod_data = {0};
3389 ramrod_data.drv_counter = bp->stats_counter++;
3390 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3391 for_each_queue(bp, i)
3392 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3394 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3395 ((u32 *)&ramrod_data)[1],
3396 ((u32 *)&ramrod_data)[0], 0);
3398 /* stats ramrod has it's own slot on the spq */
3400 bp->stats_pending = 1;
3405 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3407 struct dmae_command *dmae = &bp->stats_dmae;
3408 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3410 *stats_comp = DMAE_COMP_VAL;
3411 if (CHIP_REV_IS_SLOW(bp))
3415 if (bp->executer_idx) {
3416 int loader_idx = PMF_DMAE_C(bp);
3418 memset(dmae, 0, sizeof(struct dmae_command));
3420 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3422 DMAE_CMD_DST_RESET |
3424 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3426 DMAE_CMD_ENDIANITY_DW_SWAP |
3428 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3430 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3431 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3432 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3433 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3434 sizeof(struct dmae_command) *
3435 (loader_idx + 1)) >> 2;
3436 dmae->dst_addr_hi = 0;
3437 dmae->len = sizeof(struct dmae_command) >> 2;
3440 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3441 dmae->comp_addr_hi = 0;
3445 bnx2x_post_dmae(bp, dmae, loader_idx);
3447 } else if (bp->func_stx) {
3449 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3453 static int bnx2x_stats_comp(struct bnx2x *bp)
3455 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3459 while (*stats_comp != DMAE_COMP_VAL) {
3461 BNX2X_ERR("timeout waiting for stats finished\n");
3471 * Statistics service functions
3474 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3476 struct dmae_command *dmae;
3478 int loader_idx = PMF_DMAE_C(bp);
3479 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3482 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3483 BNX2X_ERR("BUG!\n");
3487 bp->executer_idx = 0;
3489 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3491 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3493 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3495 DMAE_CMD_ENDIANITY_DW_SWAP |
3497 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3498 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3500 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3501 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3502 dmae->src_addr_lo = bp->port.port_stx >> 2;
3503 dmae->src_addr_hi = 0;
3504 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3505 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3506 dmae->len = DMAE_LEN32_RD_MAX;
3507 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3508 dmae->comp_addr_hi = 0;
3511 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3512 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3513 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3514 dmae->src_addr_hi = 0;
3515 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3516 DMAE_LEN32_RD_MAX * 4);
3517 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3518 DMAE_LEN32_RD_MAX * 4);
3519 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3520 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3521 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3522 dmae->comp_val = DMAE_COMP_VAL;
3525 bnx2x_hw_stats_post(bp);
3526 bnx2x_stats_comp(bp);
3529 static void bnx2x_port_stats_init(struct bnx2x *bp)
3531 struct dmae_command *dmae;
3532 int port = BP_PORT(bp);
3533 int vn = BP_E1HVN(bp);
3535 int loader_idx = PMF_DMAE_C(bp);
3537 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3540 if (!bp->link_vars.link_up || !bp->port.pmf) {
3541 BNX2X_ERR("BUG!\n");
3545 bp->executer_idx = 0;
3548 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3549 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3550 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3552 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3554 DMAE_CMD_ENDIANITY_DW_SWAP |
3556 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3557 (vn << DMAE_CMD_E1HVN_SHIFT));
3559 if (bp->port.port_stx) {
3561 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3562 dmae->opcode = opcode;
3563 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3564 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3565 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3566 dmae->dst_addr_hi = 0;
3567 dmae->len = sizeof(struct host_port_stats) >> 2;
3568 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3569 dmae->comp_addr_hi = 0;
3575 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3576 dmae->opcode = opcode;
3577 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3578 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3579 dmae->dst_addr_lo = bp->func_stx >> 2;
3580 dmae->dst_addr_hi = 0;
3581 dmae->len = sizeof(struct host_func_stats) >> 2;
3582 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3583 dmae->comp_addr_hi = 0;
3588 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3589 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3590 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3592 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3594 DMAE_CMD_ENDIANITY_DW_SWAP |
3596 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3597 (vn << DMAE_CMD_E1HVN_SHIFT));
3599 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3601 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3602 NIG_REG_INGRESS_BMAC0_MEM);
3604 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3605 BIGMAC_REGISTER_TX_STAT_GTBYT */
3606 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3607 dmae->opcode = opcode;
3608 dmae->src_addr_lo = (mac_addr +
3609 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3610 dmae->src_addr_hi = 0;
3611 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3612 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3613 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3614 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3615 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3616 dmae->comp_addr_hi = 0;
3619 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3620 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3621 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3622 dmae->opcode = opcode;
3623 dmae->src_addr_lo = (mac_addr +
3624 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3625 dmae->src_addr_hi = 0;
3626 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3627 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3628 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3629 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3630 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3631 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3632 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3633 dmae->comp_addr_hi = 0;
3636 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3638 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3640 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3641 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3642 dmae->opcode = opcode;
3643 dmae->src_addr_lo = (mac_addr +
3644 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3645 dmae->src_addr_hi = 0;
3646 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3647 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3648 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3649 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3650 dmae->comp_addr_hi = 0;
3653 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3654 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3655 dmae->opcode = opcode;
3656 dmae->src_addr_lo = (mac_addr +
3657 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3658 dmae->src_addr_hi = 0;
3659 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3660 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3661 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3662 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3664 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3665 dmae->comp_addr_hi = 0;
3668 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3669 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3670 dmae->opcode = opcode;
3671 dmae->src_addr_lo = (mac_addr +
3672 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3673 dmae->src_addr_hi = 0;
3674 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3675 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3676 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3677 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3678 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3679 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3680 dmae->comp_addr_hi = 0;
3685 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3686 dmae->opcode = opcode;
3687 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3688 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3689 dmae->src_addr_hi = 0;
3690 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3691 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3692 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3693 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3694 dmae->comp_addr_hi = 0;
3697 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3698 dmae->opcode = opcode;
3699 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3700 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3701 dmae->src_addr_hi = 0;
3702 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3703 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3704 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3705 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3706 dmae->len = (2*sizeof(u32)) >> 2;
3707 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3708 dmae->comp_addr_hi = 0;
3711 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3712 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3713 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3714 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3716 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3718 DMAE_CMD_ENDIANITY_DW_SWAP |
3720 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3721 (vn << DMAE_CMD_E1HVN_SHIFT));
3722 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3723 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3724 dmae->src_addr_hi = 0;
3725 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3726 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3727 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3728 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3729 dmae->len = (2*sizeof(u32)) >> 2;
3730 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3731 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3732 dmae->comp_val = DMAE_COMP_VAL;
3737 static void bnx2x_func_stats_init(struct bnx2x *bp)
3739 struct dmae_command *dmae = &bp->stats_dmae;
3740 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3743 if (!bp->func_stx) {
3744 BNX2X_ERR("BUG!\n");
3748 bp->executer_idx = 0;
3749 memset(dmae, 0, sizeof(struct dmae_command));
3751 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3752 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3753 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3755 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3757 DMAE_CMD_ENDIANITY_DW_SWAP |
3759 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3760 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3761 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3762 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3763 dmae->dst_addr_lo = bp->func_stx >> 2;
3764 dmae->dst_addr_hi = 0;
3765 dmae->len = sizeof(struct host_func_stats) >> 2;
3766 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3767 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3768 dmae->comp_val = DMAE_COMP_VAL;
3773 static void bnx2x_stats_start(struct bnx2x *bp)
3776 bnx2x_port_stats_init(bp);
3778 else if (bp->func_stx)
3779 bnx2x_func_stats_init(bp);
3781 bnx2x_hw_stats_post(bp);
3782 bnx2x_storm_stats_post(bp);
3785 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3787 bnx2x_stats_comp(bp);
3788 bnx2x_stats_pmf_update(bp);
3789 bnx2x_stats_start(bp);
3792 static void bnx2x_stats_restart(struct bnx2x *bp)
3794 bnx2x_stats_comp(bp);
3795 bnx2x_stats_start(bp);
3798 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3800 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3801 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3802 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3808 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3809 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3810 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3811 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3812 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3813 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3814 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3815 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3816 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3817 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3818 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3819 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3820 UPDATE_STAT64(tx_stat_gt127,
3821 tx_stat_etherstatspkts65octetsto127octets);
3822 UPDATE_STAT64(tx_stat_gt255,
3823 tx_stat_etherstatspkts128octetsto255octets);
3824 UPDATE_STAT64(tx_stat_gt511,
3825 tx_stat_etherstatspkts256octetsto511octets);
3826 UPDATE_STAT64(tx_stat_gt1023,
3827 tx_stat_etherstatspkts512octetsto1023octets);
3828 UPDATE_STAT64(tx_stat_gt1518,
3829 tx_stat_etherstatspkts1024octetsto1522octets);
3830 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3831 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3832 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3833 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3834 UPDATE_STAT64(tx_stat_gterr,
3835 tx_stat_dot3statsinternalmactransmiterrors);
3836 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3838 estats->pause_frames_received_hi =
3839 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3840 estats->pause_frames_received_lo =
3841 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3843 estats->pause_frames_sent_hi =
3844 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3845 estats->pause_frames_sent_lo =
3846 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3849 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3851 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3852 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3853 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3855 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3856 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3857 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3858 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3859 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3860 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3861 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3862 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3863 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3864 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3865 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3866 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3867 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3868 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3869 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3870 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3871 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3872 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3873 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3874 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3875 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3876 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3877 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3878 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3879 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3880 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3881 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3882 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3883 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3884 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3885 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3887 estats->pause_frames_received_hi =
3888 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3889 estats->pause_frames_received_lo =
3890 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3891 ADD_64(estats->pause_frames_received_hi,
3892 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3893 estats->pause_frames_received_lo,
3894 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3896 estats->pause_frames_sent_hi =
3897 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3898 estats->pause_frames_sent_lo =
3899 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3900 ADD_64(estats->pause_frames_sent_hi,
3901 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3902 estats->pause_frames_sent_lo,
3903 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3906 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3908 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3909 struct nig_stats *old = &(bp->port.old_nig_stats);
3910 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3911 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3918 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3919 bnx2x_bmac_stats_update(bp);
3921 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3922 bnx2x_emac_stats_update(bp);
3924 else { /* unreached */
3925 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3929 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3930 new->brb_discard - old->brb_discard);
3931 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3932 new->brb_truncate - old->brb_truncate);
3934 UPDATE_STAT64_NIG(egress_mac_pkt0,
3935 etherstatspkts1024octetsto1522octets);
3936 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3938 memcpy(old, new, sizeof(struct nig_stats));
3940 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3941 sizeof(struct mac_stx));
3942 estats->brb_drop_hi = pstats->brb_drop_hi;
3943 estats->brb_drop_lo = pstats->brb_drop_lo;
3945 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3947 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3948 if (nig_timer_max != estats->nig_timer_max) {
3949 estats->nig_timer_max = nig_timer_max;
3950 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3956 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3958 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3959 struct tstorm_per_port_stats *tport =
3960 &stats->tstorm_common.port_statistics;
3961 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3962 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3965 memcpy(&(fstats->total_bytes_received_hi),
3966 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3967 sizeof(struct host_func_stats) - 2*sizeof(u32));
3968 estats->error_bytes_received_hi = 0;
3969 estats->error_bytes_received_lo = 0;
3970 estats->etherstatsoverrsizepkts_hi = 0;
3971 estats->etherstatsoverrsizepkts_lo = 0;
3972 estats->no_buff_discard_hi = 0;
3973 estats->no_buff_discard_lo = 0;
3975 for_each_rx_queue(bp, i) {
3976 struct bnx2x_fastpath *fp = &bp->fp[i];
3977 int cl_id = fp->cl_id;
3978 struct tstorm_per_client_stats *tclient =
3979 &stats->tstorm_common.client_statistics[cl_id];
3980 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3981 struct ustorm_per_client_stats *uclient =
3982 &stats->ustorm_common.client_statistics[cl_id];
3983 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3984 struct xstorm_per_client_stats *xclient =
3985 &stats->xstorm_common.client_statistics[cl_id];
3986 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3987 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3990 /* are storm stats valid? */
3991 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3992 bp->stats_counter) {
3993 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3994 " xstorm counter (%d) != stats_counter (%d)\n",
3995 i, xclient->stats_counter, bp->stats_counter);
3998 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3999 bp->stats_counter) {
4000 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4001 " tstorm counter (%d) != stats_counter (%d)\n",
4002 i, tclient->stats_counter, bp->stats_counter);
4005 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4006 bp->stats_counter) {
4007 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4008 " ustorm counter (%d) != stats_counter (%d)\n",
4009 i, uclient->stats_counter, bp->stats_counter);
4013 qstats->total_bytes_received_hi =
4014 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4015 qstats->total_bytes_received_lo =
4016 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4018 ADD_64(qstats->total_bytes_received_hi,
4019 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4020 qstats->total_bytes_received_lo,
4021 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4023 ADD_64(qstats->total_bytes_received_hi,
4024 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4025 qstats->total_bytes_received_lo,
4026 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4028 qstats->valid_bytes_received_hi =
4029 qstats->total_bytes_received_hi;
4030 qstats->valid_bytes_received_lo =
4031 qstats->total_bytes_received_lo;
4033 qstats->error_bytes_received_hi =
4034 le32_to_cpu(tclient->rcv_error_bytes.hi);
4035 qstats->error_bytes_received_lo =
4036 le32_to_cpu(tclient->rcv_error_bytes.lo);
4038 ADD_64(qstats->total_bytes_received_hi,
4039 qstats->error_bytes_received_hi,
4040 qstats->total_bytes_received_lo,
4041 qstats->error_bytes_received_lo);
4043 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4044 total_unicast_packets_received);
4045 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4046 total_multicast_packets_received);
4047 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4048 total_broadcast_packets_received);
4049 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4050 etherstatsoverrsizepkts);
4051 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4053 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4054 total_unicast_packets_received);
4055 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4056 total_multicast_packets_received);
4057 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4058 total_broadcast_packets_received);
4059 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4060 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4061 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4063 qstats->total_bytes_transmitted_hi =
4064 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4065 qstats->total_bytes_transmitted_lo =
4066 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4068 ADD_64(qstats->total_bytes_transmitted_hi,
4069 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4070 qstats->total_bytes_transmitted_lo,
4071 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4073 ADD_64(qstats->total_bytes_transmitted_hi,
4074 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4075 qstats->total_bytes_transmitted_lo,
4076 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4078 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4079 total_unicast_packets_transmitted);
4080 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4081 total_multicast_packets_transmitted);
4082 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4083 total_broadcast_packets_transmitted);
4085 old_tclient->checksum_discard = tclient->checksum_discard;
4086 old_tclient->ttl0_discard = tclient->ttl0_discard;
4088 ADD_64(fstats->total_bytes_received_hi,
4089 qstats->total_bytes_received_hi,
4090 fstats->total_bytes_received_lo,
4091 qstats->total_bytes_received_lo);
4092 ADD_64(fstats->total_bytes_transmitted_hi,
4093 qstats->total_bytes_transmitted_hi,
4094 fstats->total_bytes_transmitted_lo,
4095 qstats->total_bytes_transmitted_lo);
4096 ADD_64(fstats->total_unicast_packets_received_hi,
4097 qstats->total_unicast_packets_received_hi,
4098 fstats->total_unicast_packets_received_lo,
4099 qstats->total_unicast_packets_received_lo);
4100 ADD_64(fstats->total_multicast_packets_received_hi,
4101 qstats->total_multicast_packets_received_hi,
4102 fstats->total_multicast_packets_received_lo,
4103 qstats->total_multicast_packets_received_lo);
4104 ADD_64(fstats->total_broadcast_packets_received_hi,
4105 qstats->total_broadcast_packets_received_hi,
4106 fstats->total_broadcast_packets_received_lo,
4107 qstats->total_broadcast_packets_received_lo);
4108 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4109 qstats->total_unicast_packets_transmitted_hi,
4110 fstats->total_unicast_packets_transmitted_lo,
4111 qstats->total_unicast_packets_transmitted_lo);
4112 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4113 qstats->total_multicast_packets_transmitted_hi,
4114 fstats->total_multicast_packets_transmitted_lo,
4115 qstats->total_multicast_packets_transmitted_lo);
4116 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4117 qstats->total_broadcast_packets_transmitted_hi,
4118 fstats->total_broadcast_packets_transmitted_lo,
4119 qstats->total_broadcast_packets_transmitted_lo);
4120 ADD_64(fstats->valid_bytes_received_hi,
4121 qstats->valid_bytes_received_hi,
4122 fstats->valid_bytes_received_lo,
4123 qstats->valid_bytes_received_lo);
4125 ADD_64(estats->error_bytes_received_hi,
4126 qstats->error_bytes_received_hi,
4127 estats->error_bytes_received_lo,
4128 qstats->error_bytes_received_lo);
4129 ADD_64(estats->etherstatsoverrsizepkts_hi,
4130 qstats->etherstatsoverrsizepkts_hi,
4131 estats->etherstatsoverrsizepkts_lo,
4132 qstats->etherstatsoverrsizepkts_lo);
4133 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4134 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4137 ADD_64(fstats->total_bytes_received_hi,
4138 estats->rx_stat_ifhcinbadoctets_hi,
4139 fstats->total_bytes_received_lo,
4140 estats->rx_stat_ifhcinbadoctets_lo);
4142 memcpy(estats, &(fstats->total_bytes_received_hi),
4143 sizeof(struct host_func_stats) - 2*sizeof(u32));
4145 ADD_64(estats->etherstatsoverrsizepkts_hi,
4146 estats->rx_stat_dot3statsframestoolong_hi,
4147 estats->etherstatsoverrsizepkts_lo,
4148 estats->rx_stat_dot3statsframestoolong_lo);
4149 ADD_64(estats->error_bytes_received_hi,
4150 estats->rx_stat_ifhcinbadoctets_hi,
4151 estats->error_bytes_received_lo,
4152 estats->rx_stat_ifhcinbadoctets_lo);
4155 estats->mac_filter_discard =
4156 le32_to_cpu(tport->mac_filter_discard);
4157 estats->xxoverflow_discard =
4158 le32_to_cpu(tport->xxoverflow_discard);
4159 estats->brb_truncate_discard =
4160 le32_to_cpu(tport->brb_truncate_discard);
4161 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4164 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4166 bp->stats_pending = 0;
4171 static void bnx2x_net_stats_update(struct bnx2x *bp)
4173 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4174 struct net_device_stats *nstats = &bp->dev->stats;
4177 nstats->rx_packets =
4178 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4179 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4180 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4182 nstats->tx_packets =
4183 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4184 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4185 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4187 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4189 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4191 nstats->rx_dropped = estats->mac_discard;
4192 for_each_rx_queue(bp, i)
4193 nstats->rx_dropped +=
4194 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4196 nstats->tx_dropped = 0;
4199 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4201 nstats->collisions =
4202 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4204 nstats->rx_length_errors =
4205 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4206 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4207 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4208 bnx2x_hilo(&estats->brb_truncate_hi);
4209 nstats->rx_crc_errors =
4210 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4211 nstats->rx_frame_errors =
4212 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4213 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4214 nstats->rx_missed_errors = estats->xxoverflow_discard;
4216 nstats->rx_errors = nstats->rx_length_errors +
4217 nstats->rx_over_errors +
4218 nstats->rx_crc_errors +
4219 nstats->rx_frame_errors +
4220 nstats->rx_fifo_errors +
4221 nstats->rx_missed_errors;
4223 nstats->tx_aborted_errors =
4224 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4225 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4226 nstats->tx_carrier_errors =
4227 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4228 nstats->tx_fifo_errors = 0;
4229 nstats->tx_heartbeat_errors = 0;
4230 nstats->tx_window_errors = 0;
4232 nstats->tx_errors = nstats->tx_aborted_errors +
4233 nstats->tx_carrier_errors +
4234 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4237 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4239 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4242 estats->driver_xoff = 0;
4243 estats->rx_err_discard_pkt = 0;
4244 estats->rx_skb_alloc_failed = 0;
4245 estats->hw_csum_err = 0;
4246 for_each_rx_queue(bp, i) {
4247 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4249 estats->driver_xoff += qstats->driver_xoff;
4250 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4251 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4252 estats->hw_csum_err += qstats->hw_csum_err;
4256 static void bnx2x_stats_update(struct bnx2x *bp)
4258 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4260 if (*stats_comp != DMAE_COMP_VAL)
4264 bnx2x_hw_stats_update(bp);
4266 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4267 BNX2X_ERR("storm stats were not updated for 3 times\n");
4272 bnx2x_net_stats_update(bp);
4273 bnx2x_drv_stats_update(bp);
4275 if (bp->msglevel & NETIF_MSG_TIMER) {
4276 struct bnx2x_fastpath *fp0_rx = bp->fp;
4277 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4278 struct tstorm_per_client_stats *old_tclient =
4279 &bp->fp->old_tclient;
4280 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4281 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4282 struct net_device_stats *nstats = &bp->dev->stats;
4285 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4286 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4288 bnx2x_tx_avail(fp0_tx),
4289 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4290 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4292 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4293 fp0_rx->rx_comp_cons),
4294 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4295 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4296 "brb truncate %u\n",
4297 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4298 qstats->driver_xoff,
4299 estats->brb_drop_lo, estats->brb_truncate_lo);
4300 printk(KERN_DEBUG "tstats: checksum_discard %u "
4301 "packets_too_big_discard %lu no_buff_discard %lu "
4302 "mac_discard %u mac_filter_discard %u "
4303 "xxovrflow_discard %u brb_truncate_discard %u "
4304 "ttl0_discard %u\n",
4305 le32_to_cpu(old_tclient->checksum_discard),
4306 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4307 bnx2x_hilo(&qstats->no_buff_discard_hi),
4308 estats->mac_discard, estats->mac_filter_discard,
4309 estats->xxoverflow_discard, estats->brb_truncate_discard,
4310 le32_to_cpu(old_tclient->ttl0_discard));
4312 for_each_queue(bp, i) {
4313 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4314 bnx2x_fp(bp, i, tx_pkt),
4315 bnx2x_fp(bp, i, rx_pkt),
4316 bnx2x_fp(bp, i, rx_calls));
4320 bnx2x_hw_stats_post(bp);
4321 bnx2x_storm_stats_post(bp);
4324 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4326 struct dmae_command *dmae;
4328 int loader_idx = PMF_DMAE_C(bp);
4329 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4331 bp->executer_idx = 0;
4333 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4335 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4337 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4339 DMAE_CMD_ENDIANITY_DW_SWAP |
4341 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4342 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4344 if (bp->port.port_stx) {
4346 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4348 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4350 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4351 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4352 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4353 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4354 dmae->dst_addr_hi = 0;
4355 dmae->len = sizeof(struct host_port_stats) >> 2;
4357 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4358 dmae->comp_addr_hi = 0;
4361 dmae->comp_addr_lo =
4362 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4363 dmae->comp_addr_hi =
4364 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4365 dmae->comp_val = DMAE_COMP_VAL;
4373 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4374 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4376 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4377 dmae->dst_addr_lo = bp->func_stx >> 2;
4378 dmae->dst_addr_hi = 0;
4379 dmae->len = sizeof(struct host_func_stats) >> 2;
4380 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4381 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4382 dmae->comp_val = DMAE_COMP_VAL;
4388 static void bnx2x_stats_stop(struct bnx2x *bp)
4392 bnx2x_stats_comp(bp);
4395 update = (bnx2x_hw_stats_update(bp) == 0);
4397 update |= (bnx2x_storm_stats_update(bp) == 0);
4400 bnx2x_net_stats_update(bp);
4403 bnx2x_port_stats_stop(bp);
4405 bnx2x_hw_stats_post(bp);
4406 bnx2x_stats_comp(bp);
4410 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4414 static const struct {
4415 void (*action)(struct bnx2x *bp);
4416 enum bnx2x_stats_state next_state;
4417 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4420 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4421 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4422 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4423 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4426 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4427 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4428 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4429 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4433 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4435 enum bnx2x_stats_state state = bp->stats_state;
4437 bnx2x_stats_stm[state][event].action(bp);
4438 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4440 /* Make sure the state has been "changed" */
4443 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4444 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4445 state, event, bp->stats_state);
4448 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4450 struct dmae_command *dmae;
4451 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4454 if (!bp->port.pmf || !bp->port.port_stx) {
4455 BNX2X_ERR("BUG!\n");
4459 bp->executer_idx = 0;
4461 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4462 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4463 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4464 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4466 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4468 DMAE_CMD_ENDIANITY_DW_SWAP |
4470 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4471 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4472 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4473 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4474 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4475 dmae->dst_addr_hi = 0;
4476 dmae->len = sizeof(struct host_port_stats) >> 2;
4477 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4478 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4479 dmae->comp_val = DMAE_COMP_VAL;
4482 bnx2x_hw_stats_post(bp);
4483 bnx2x_stats_comp(bp);
4486 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4488 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4489 int port = BP_PORT(bp);
4494 if (!bp->port.pmf || !bp->func_stx) {
4495 BNX2X_ERR("BUG!\n");
4499 /* save our func_stx */
4500 func_stx = bp->func_stx;
4502 for (vn = VN_0; vn < vn_max; vn++) {
4505 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4506 bnx2x_func_stats_init(bp);
4507 bnx2x_hw_stats_post(bp);
4508 bnx2x_stats_comp(bp);
4511 /* restore our func_stx */
4512 bp->func_stx = func_stx;
4515 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4517 struct dmae_command *dmae = &bp->stats_dmae;
4518 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4521 if (!bp->func_stx) {
4522 BNX2X_ERR("BUG!\n");
4526 bp->executer_idx = 0;
4527 memset(dmae, 0, sizeof(struct dmae_command));
4529 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4530 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4531 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4533 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4535 DMAE_CMD_ENDIANITY_DW_SWAP |
4537 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4538 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4539 dmae->src_addr_lo = bp->func_stx >> 2;
4540 dmae->src_addr_hi = 0;
4541 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4542 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4543 dmae->len = sizeof(struct host_func_stats) >> 2;
4544 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4545 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4546 dmae->comp_val = DMAE_COMP_VAL;
4549 bnx2x_hw_stats_post(bp);
4550 bnx2x_stats_comp(bp);
4553 static void bnx2x_stats_init(struct bnx2x *bp)
4555 int port = BP_PORT(bp);
4556 int func = BP_FUNC(bp);
4559 bp->stats_pending = 0;
4560 bp->executer_idx = 0;
4561 bp->stats_counter = 0;
4563 /* port and func stats for management */
4564 if (!BP_NOMCP(bp)) {
4565 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4566 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4569 bp->port.port_stx = 0;
4572 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4573 bp->port.port_stx, bp->func_stx);
4576 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4577 bp->port.old_nig_stats.brb_discard =
4578 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4579 bp->port.old_nig_stats.brb_truncate =
4580 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4581 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4582 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4583 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4584 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4586 /* function stats */
4587 for_each_queue(bp, i) {
4588 struct bnx2x_fastpath *fp = &bp->fp[i];
4590 memset(&fp->old_tclient, 0,
4591 sizeof(struct tstorm_per_client_stats));
4592 memset(&fp->old_uclient, 0,
4593 sizeof(struct ustorm_per_client_stats));
4594 memset(&fp->old_xclient, 0,
4595 sizeof(struct xstorm_per_client_stats));
4596 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4599 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4600 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4602 bp->stats_state = STATS_STATE_DISABLED;
4605 if (bp->port.port_stx)
4606 bnx2x_port_stats_base_init(bp);
4609 bnx2x_func_stats_base_init(bp);
4611 } else if (bp->func_stx)
4612 bnx2x_func_stats_base_update(bp);
4615 static void bnx2x_timer(unsigned long data)
4617 struct bnx2x *bp = (struct bnx2x *) data;
4619 if (!netif_running(bp->dev))
4622 if (atomic_read(&bp->intr_sem) != 0)
4626 struct bnx2x_fastpath *fp = &bp->fp[0];
4630 rc = bnx2x_rx_int(fp, 1000);
4633 if (!BP_NOMCP(bp)) {
4634 int func = BP_FUNC(bp);
4638 ++bp->fw_drv_pulse_wr_seq;
4639 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4640 /* TBD - add SYSTEM_TIME */
4641 drv_pulse = bp->fw_drv_pulse_wr_seq;
4642 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4644 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4645 MCP_PULSE_SEQ_MASK);
4646 /* The delta between driver pulse and mcp response
4647 * should be 1 (before mcp response) or 0 (after mcp response)
4649 if ((drv_pulse != mcp_pulse) &&
4650 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4651 /* someone lost a heartbeat... */
4652 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4653 drv_pulse, mcp_pulse);
4657 if ((bp->state == BNX2X_STATE_OPEN) ||
4658 (bp->state == BNX2X_STATE_DISABLED))
4659 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4662 mod_timer(&bp->timer, jiffies + bp->current_interval);
4665 /* end of Statistics */
4670 * nic init service functions
4673 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4675 int port = BP_PORT(bp);
4678 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4679 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4680 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4681 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4682 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4683 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4686 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4687 dma_addr_t mapping, int sb_id)
4689 int port = BP_PORT(bp);
4690 int func = BP_FUNC(bp);
4695 section = ((u64)mapping) + offsetof(struct host_status_block,
4697 sb->u_status_block.status_block_id = sb_id;
4699 REG_WR(bp, BAR_CSTRORM_INTMEM +
4700 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4701 REG_WR(bp, BAR_CSTRORM_INTMEM +
4702 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4704 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4705 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4707 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4708 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4709 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4712 section = ((u64)mapping) + offsetof(struct host_status_block,
4714 sb->c_status_block.status_block_id = sb_id;
4716 REG_WR(bp, BAR_CSTRORM_INTMEM +
4717 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4718 REG_WR(bp, BAR_CSTRORM_INTMEM +
4719 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4721 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4722 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4724 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4725 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4726 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4728 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4731 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4733 int func = BP_FUNC(bp);
4735 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4736 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4737 sizeof(struct tstorm_def_status_block)/4);
4738 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4739 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4740 sizeof(struct cstorm_def_status_block_u)/4);
4741 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4742 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4743 sizeof(struct cstorm_def_status_block_c)/4);
4744 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4745 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4746 sizeof(struct xstorm_def_status_block)/4);
4749 static void bnx2x_init_def_sb(struct bnx2x *bp,
4750 struct host_def_status_block *def_sb,
4751 dma_addr_t mapping, int sb_id)
4753 int port = BP_PORT(bp);
4754 int func = BP_FUNC(bp);
4755 int index, val, reg_offset;
4759 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4760 atten_status_block);
4761 def_sb->atten_status_block.status_block_id = sb_id;
4765 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4766 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4768 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4769 bp->attn_group[index].sig[0] = REG_RD(bp,
4770 reg_offset + 0x10*index);
4771 bp->attn_group[index].sig[1] = REG_RD(bp,
4772 reg_offset + 0x4 + 0x10*index);
4773 bp->attn_group[index].sig[2] = REG_RD(bp,
4774 reg_offset + 0x8 + 0x10*index);
4775 bp->attn_group[index].sig[3] = REG_RD(bp,
4776 reg_offset + 0xc + 0x10*index);
4779 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4780 HC_REG_ATTN_MSG0_ADDR_L);
4782 REG_WR(bp, reg_offset, U64_LO(section));
4783 REG_WR(bp, reg_offset + 4, U64_HI(section));
4785 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4787 val = REG_RD(bp, reg_offset);
4789 REG_WR(bp, reg_offset, val);
4792 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4793 u_def_status_block);
4794 def_sb->u_def_status_block.status_block_id = sb_id;
4796 REG_WR(bp, BAR_CSTRORM_INTMEM +
4797 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4798 REG_WR(bp, BAR_CSTRORM_INTMEM +
4799 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4801 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4802 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4804 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4805 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4806 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4809 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4810 c_def_status_block);
4811 def_sb->c_def_status_block.status_block_id = sb_id;
4813 REG_WR(bp, BAR_CSTRORM_INTMEM +
4814 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4815 REG_WR(bp, BAR_CSTRORM_INTMEM +
4816 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4818 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4819 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4821 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4822 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4823 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4826 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4827 t_def_status_block);
4828 def_sb->t_def_status_block.status_block_id = sb_id;
4830 REG_WR(bp, BAR_TSTRORM_INTMEM +
4831 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4832 REG_WR(bp, BAR_TSTRORM_INTMEM +
4833 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4835 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4836 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4838 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4839 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4840 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4843 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4844 x_def_status_block);
4845 def_sb->x_def_status_block.status_block_id = sb_id;
4847 REG_WR(bp, BAR_XSTRORM_INTMEM +
4848 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4849 REG_WR(bp, BAR_XSTRORM_INTMEM +
4850 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4852 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4853 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4855 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4856 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4857 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4859 bp->stats_pending = 0;
4860 bp->set_mac_pending = 0;
4862 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4865 static void bnx2x_update_coalesce(struct bnx2x *bp)
4867 int port = BP_PORT(bp);
4870 for_each_queue(bp, i) {
4871 int sb_id = bp->fp[i].sb_id;
4873 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4874 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4875 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4876 U_SB_ETH_RX_CQ_INDEX),
4878 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4879 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4880 U_SB_ETH_RX_CQ_INDEX),
4881 (bp->rx_ticks/12) ? 0 : 1);
4883 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4884 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4885 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4886 C_SB_ETH_TX_CQ_INDEX),
4888 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4889 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4890 C_SB_ETH_TX_CQ_INDEX),
4891 (bp->tx_ticks/12) ? 0 : 1);
4895 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4896 struct bnx2x_fastpath *fp, int last)
4900 for (i = 0; i < last; i++) {
4901 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4902 struct sk_buff *skb = rx_buf->skb;
4905 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4909 if (fp->tpa_state[i] == BNX2X_TPA_START)
4910 pci_unmap_single(bp->pdev,
4911 pci_unmap_addr(rx_buf, mapping),
4912 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4919 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4921 int func = BP_FUNC(bp);
4922 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4923 ETH_MAX_AGGREGATION_QUEUES_E1H;
4924 u16 ring_prod, cqe_ring_prod;
4927 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4929 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4931 if (bp->flags & TPA_ENABLE_FLAG) {
4933 for_each_rx_queue(bp, j) {
4934 struct bnx2x_fastpath *fp = &bp->fp[j];
4936 for (i = 0; i < max_agg_queues; i++) {
4937 fp->tpa_pool[i].skb =
4938 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4939 if (!fp->tpa_pool[i].skb) {
4940 BNX2X_ERR("Failed to allocate TPA "
4941 "skb pool for queue[%d] - "
4942 "disabling TPA on this "
4944 bnx2x_free_tpa_pool(bp, fp, i);
4945 fp->disable_tpa = 1;
4948 pci_unmap_addr_set((struct sw_rx_bd *)
4949 &bp->fp->tpa_pool[i],
4951 fp->tpa_state[i] = BNX2X_TPA_STOP;
4956 for_each_rx_queue(bp, j) {
4957 struct bnx2x_fastpath *fp = &bp->fp[j];
4960 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4961 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4963 /* Mark queue as Rx */
4964 fp->is_rx_queue = 1;
4966 /* "next page" elements initialization */
4968 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4969 struct eth_rx_sge *sge;
4971 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4973 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4974 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4976 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4977 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4980 bnx2x_init_sge_ring_bit_mask(fp);
4983 for (i = 1; i <= NUM_RX_RINGS; i++) {
4984 struct eth_rx_bd *rx_bd;
4986 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4988 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4989 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4991 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4992 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4996 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4997 struct eth_rx_cqe_next_page *nextpg;
4999 nextpg = (struct eth_rx_cqe_next_page *)
5000 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5002 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5003 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5005 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5006 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5009 /* Allocate SGEs and initialize the ring elements */
5010 for (i = 0, ring_prod = 0;
5011 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5013 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5014 BNX2X_ERR("was only able to allocate "
5016 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5017 /* Cleanup already allocated elements */
5018 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5019 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5020 fp->disable_tpa = 1;
5024 ring_prod = NEXT_SGE_IDX(ring_prod);
5026 fp->rx_sge_prod = ring_prod;
5028 /* Allocate BDs and initialize BD ring */
5029 fp->rx_comp_cons = 0;
5030 cqe_ring_prod = ring_prod = 0;
5031 for (i = 0; i < bp->rx_ring_size; i++) {
5032 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5033 BNX2X_ERR("was only able to allocate "
5034 "%d rx skbs on queue[%d]\n", i, j);
5035 fp->eth_q_stats.rx_skb_alloc_failed++;
5038 ring_prod = NEXT_RX_IDX(ring_prod);
5039 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5040 WARN_ON(ring_prod <= i);
5043 fp->rx_bd_prod = ring_prod;
5044 /* must not have more available CQEs than BDs */
5045 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5047 fp->rx_pkt = fp->rx_calls = 0;
5050 * this will generate an interrupt (to the TSTORM)
5051 * must only be done after chip is initialized
5053 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5058 REG_WR(bp, BAR_USTRORM_INTMEM +
5059 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5060 U64_LO(fp->rx_comp_mapping));
5061 REG_WR(bp, BAR_USTRORM_INTMEM +
5062 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5063 U64_HI(fp->rx_comp_mapping));
5067 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5071 for_each_tx_queue(bp, j) {
5072 struct bnx2x_fastpath *fp = &bp->fp[j];
5074 for (i = 1; i <= NUM_TX_RINGS; i++) {
5075 struct eth_tx_next_bd *tx_next_bd =
5076 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5078 tx_next_bd->addr_hi =
5079 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5080 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5081 tx_next_bd->addr_lo =
5082 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5083 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5086 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5087 fp->tx_db.data.zero_fill1 = 0;
5088 fp->tx_db.data.prod = 0;
5090 fp->tx_pkt_prod = 0;
5091 fp->tx_pkt_cons = 0;
5094 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5098 /* clean tx statistics */
5099 for_each_rx_queue(bp, i)
5100 bnx2x_fp(bp, i, tx_pkt) = 0;
5103 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5105 int func = BP_FUNC(bp);
5107 spin_lock_init(&bp->spq_lock);
5109 bp->spq_left = MAX_SPQ_PENDING;
5110 bp->spq_prod_idx = 0;
5111 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5112 bp->spq_prod_bd = bp->spq;
5113 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5115 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5116 U64_LO(bp->spq_mapping));
5118 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5119 U64_HI(bp->spq_mapping));
5121 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5125 static void bnx2x_init_context(struct bnx2x *bp)
5129 for_each_rx_queue(bp, i) {
5130 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5131 struct bnx2x_fastpath *fp = &bp->fp[i];
5132 u8 cl_id = fp->cl_id;
5134 context->ustorm_st_context.common.sb_index_numbers =
5135 BNX2X_RX_SB_INDEX_NUM;
5136 context->ustorm_st_context.common.clientId = cl_id;
5137 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5138 context->ustorm_st_context.common.flags =
5139 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5140 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5141 context->ustorm_st_context.common.statistics_counter_id =
5143 context->ustorm_st_context.common.mc_alignment_log_size =
5144 BNX2X_RX_ALIGN_SHIFT;
5145 context->ustorm_st_context.common.bd_buff_size =
5147 context->ustorm_st_context.common.bd_page_base_hi =
5148 U64_HI(fp->rx_desc_mapping);
5149 context->ustorm_st_context.common.bd_page_base_lo =
5150 U64_LO(fp->rx_desc_mapping);
5151 if (!fp->disable_tpa) {
5152 context->ustorm_st_context.common.flags |=
5153 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5154 context->ustorm_st_context.common.sge_buff_size =
5155 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5157 context->ustorm_st_context.common.sge_page_base_hi =
5158 U64_HI(fp->rx_sge_mapping);
5159 context->ustorm_st_context.common.sge_page_base_lo =
5160 U64_LO(fp->rx_sge_mapping);
5162 context->ustorm_st_context.common.max_sges_for_packet =
5163 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5164 context->ustorm_st_context.common.max_sges_for_packet =
5165 ((context->ustorm_st_context.common.
5166 max_sges_for_packet + PAGES_PER_SGE - 1) &
5167 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5170 context->ustorm_ag_context.cdu_usage =
5171 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5172 CDU_REGION_NUMBER_UCM_AG,
5173 ETH_CONNECTION_TYPE);
5175 context->xstorm_ag_context.cdu_reserved =
5176 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5177 CDU_REGION_NUMBER_XCM_AG,
5178 ETH_CONNECTION_TYPE);
5181 for_each_tx_queue(bp, i) {
5182 struct bnx2x_fastpath *fp = &bp->fp[i];
5183 struct eth_context *context =
5184 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5186 context->cstorm_st_context.sb_index_number =
5187 C_SB_ETH_TX_CQ_INDEX;
5188 context->cstorm_st_context.status_block_id = fp->sb_id;
5190 context->xstorm_st_context.tx_bd_page_base_hi =
5191 U64_HI(fp->tx_desc_mapping);
5192 context->xstorm_st_context.tx_bd_page_base_lo =
5193 U64_LO(fp->tx_desc_mapping);
5194 context->xstorm_st_context.statistics_data = (fp->cl_id |
5195 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5199 static void bnx2x_init_ind_table(struct bnx2x *bp)
5201 int func = BP_FUNC(bp);
5204 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5208 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5209 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5210 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5211 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5212 bp->fp->cl_id + (i % bp->num_rx_queues));
5215 static void bnx2x_set_client_config(struct bnx2x *bp)
5217 struct tstorm_eth_client_config tstorm_client = {0};
5218 int port = BP_PORT(bp);
5221 tstorm_client.mtu = bp->dev->mtu;
5222 tstorm_client.config_flags =
5223 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5224 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5226 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5227 tstorm_client.config_flags |=
5228 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5229 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5233 for_each_queue(bp, i) {
5234 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5236 REG_WR(bp, BAR_TSTRORM_INTMEM +
5237 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5238 ((u32 *)&tstorm_client)[0]);
5239 REG_WR(bp, BAR_TSTRORM_INTMEM +
5240 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5241 ((u32 *)&tstorm_client)[1]);
5244 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5245 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5248 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5250 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5251 int mode = bp->rx_mode;
5252 int mask = (1 << BP_L_ID(bp));
5253 int func = BP_FUNC(bp);
5254 int port = BP_PORT(bp);
5256 /* All but management unicast packets should pass to the host as well */
5258 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5259 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5260 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5261 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5263 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5266 case BNX2X_RX_MODE_NONE: /* no Rx */
5267 tstorm_mac_filter.ucast_drop_all = mask;
5268 tstorm_mac_filter.mcast_drop_all = mask;
5269 tstorm_mac_filter.bcast_drop_all = mask;
5272 case BNX2X_RX_MODE_NORMAL:
5273 tstorm_mac_filter.bcast_accept_all = mask;
5276 case BNX2X_RX_MODE_ALLMULTI:
5277 tstorm_mac_filter.mcast_accept_all = mask;
5278 tstorm_mac_filter.bcast_accept_all = mask;
5281 case BNX2X_RX_MODE_PROMISC:
5282 tstorm_mac_filter.ucast_accept_all = mask;
5283 tstorm_mac_filter.mcast_accept_all = mask;
5284 tstorm_mac_filter.bcast_accept_all = mask;
5285 /* pass management unicast packets as well */
5286 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5290 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5295 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5298 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5299 REG_WR(bp, BAR_TSTRORM_INTMEM +
5300 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5301 ((u32 *)&tstorm_mac_filter)[i]);
5303 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5304 ((u32 *)&tstorm_mac_filter)[i]); */
5307 if (mode != BNX2X_RX_MODE_NONE)
5308 bnx2x_set_client_config(bp);
5311 static void bnx2x_init_internal_common(struct bnx2x *bp)
5315 /* Zero this manually as its initialization is
5316 currently missing in the initTool */
5317 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5318 REG_WR(bp, BAR_USTRORM_INTMEM +
5319 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5322 static void bnx2x_init_internal_port(struct bnx2x *bp)
5324 int port = BP_PORT(bp);
5327 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5329 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5330 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5331 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5334 static void bnx2x_init_internal_func(struct bnx2x *bp)
5336 struct tstorm_eth_function_common_config tstorm_config = {0};
5337 struct stats_indication_flags stats_flags = {0};
5338 int port = BP_PORT(bp);
5339 int func = BP_FUNC(bp);
5345 tstorm_config.config_flags = MULTI_FLAGS(bp);
5346 tstorm_config.rss_result_mask = MULTI_MASK;
5349 /* Enable TPA if needed */
5350 if (bp->flags & TPA_ENABLE_FLAG)
5351 tstorm_config.config_flags |=
5352 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5355 tstorm_config.config_flags |=
5356 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5358 tstorm_config.leading_client_id = BP_L_ID(bp);
5360 REG_WR(bp, BAR_TSTRORM_INTMEM +
5361 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5362 (*(u32 *)&tstorm_config));
5364 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5365 bnx2x_set_storm_rx_mode(bp);
5367 for_each_queue(bp, i) {
5368 u8 cl_id = bp->fp[i].cl_id;
5370 /* reset xstorm per client statistics */
5371 offset = BAR_XSTRORM_INTMEM +
5372 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5374 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5375 REG_WR(bp, offset + j*4, 0);
5377 /* reset tstorm per client statistics */
5378 offset = BAR_TSTRORM_INTMEM +
5379 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5381 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5382 REG_WR(bp, offset + j*4, 0);
5384 /* reset ustorm per client statistics */
5385 offset = BAR_USTRORM_INTMEM +
5386 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5388 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5389 REG_WR(bp, offset + j*4, 0);
5392 /* Init statistics related context */
5393 stats_flags.collect_eth = 1;
5395 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5396 ((u32 *)&stats_flags)[0]);
5397 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5398 ((u32 *)&stats_flags)[1]);
5400 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5401 ((u32 *)&stats_flags)[0]);
5402 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5403 ((u32 *)&stats_flags)[1]);
5405 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5406 ((u32 *)&stats_flags)[0]);
5407 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5408 ((u32 *)&stats_flags)[1]);
5410 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5411 ((u32 *)&stats_flags)[0]);
5412 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5413 ((u32 *)&stats_flags)[1]);
5415 REG_WR(bp, BAR_XSTRORM_INTMEM +
5416 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5417 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5418 REG_WR(bp, BAR_XSTRORM_INTMEM +
5419 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5420 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5422 REG_WR(bp, BAR_TSTRORM_INTMEM +
5423 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5424 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5425 REG_WR(bp, BAR_TSTRORM_INTMEM +
5426 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5427 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5429 REG_WR(bp, BAR_USTRORM_INTMEM +
5430 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5431 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5432 REG_WR(bp, BAR_USTRORM_INTMEM +
5433 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5434 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5436 if (CHIP_IS_E1H(bp)) {
5437 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5439 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5441 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5443 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5446 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5450 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5452 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5453 SGE_PAGE_SIZE * PAGES_PER_SGE),
5455 for_each_rx_queue(bp, i) {
5456 struct bnx2x_fastpath *fp = &bp->fp[i];
5458 REG_WR(bp, BAR_USTRORM_INTMEM +
5459 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5460 U64_LO(fp->rx_comp_mapping));
5461 REG_WR(bp, BAR_USTRORM_INTMEM +
5462 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5463 U64_HI(fp->rx_comp_mapping));
5466 REG_WR(bp, BAR_USTRORM_INTMEM +
5467 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5468 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5469 REG_WR(bp, BAR_USTRORM_INTMEM +
5470 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5471 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5473 REG_WR16(bp, BAR_USTRORM_INTMEM +
5474 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5478 /* dropless flow control */
5479 if (CHIP_IS_E1H(bp)) {
5480 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5482 rx_pause.bd_thr_low = 250;
5483 rx_pause.cqe_thr_low = 250;
5485 rx_pause.sge_thr_low = 0;
5486 rx_pause.bd_thr_high = 350;
5487 rx_pause.cqe_thr_high = 350;
5488 rx_pause.sge_thr_high = 0;
5490 for_each_rx_queue(bp, i) {
5491 struct bnx2x_fastpath *fp = &bp->fp[i];
5493 if (!fp->disable_tpa) {
5494 rx_pause.sge_thr_low = 150;
5495 rx_pause.sge_thr_high = 250;
5499 offset = BAR_USTRORM_INTMEM +
5500 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5503 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5505 REG_WR(bp, offset + j*4,
5506 ((u32 *)&rx_pause)[j]);
5510 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5512 /* Init rate shaping and fairness contexts */
5516 /* During init there is no active link
5517 Until link is up, set link rate to 10Gbps */
5518 bp->link_vars.line_speed = SPEED_10000;
5519 bnx2x_init_port_minmax(bp);
5521 bnx2x_calc_vn_weight_sum(bp);
5523 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5524 bnx2x_init_vn_minmax(bp, 2*vn + port);
5526 /* Enable rate shaping and fairness */
5527 bp->cmng.flags.cmng_enables =
5528 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5529 if (bp->vn_weight_sum)
5530 bp->cmng.flags.cmng_enables |=
5531 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5533 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5534 " fairness will be disabled\n");
5536 /* rate shaping and fairness are disabled */
5538 "single function mode minmax will be disabled\n");
5542 /* Store it to internal memory */
5544 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5545 REG_WR(bp, BAR_XSTRORM_INTMEM +
5546 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5547 ((u32 *)(&bp->cmng))[i]);
5550 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5552 switch (load_code) {
5553 case FW_MSG_CODE_DRV_LOAD_COMMON:
5554 bnx2x_init_internal_common(bp);
5557 case FW_MSG_CODE_DRV_LOAD_PORT:
5558 bnx2x_init_internal_port(bp);
5561 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5562 bnx2x_init_internal_func(bp);
5566 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5571 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5575 for_each_queue(bp, i) {
5576 struct bnx2x_fastpath *fp = &bp->fp[i];
5579 fp->state = BNX2X_FP_STATE_CLOSED;
5581 fp->cl_id = BP_L_ID(bp) + i;
5582 fp->sb_id = fp->cl_id;
5583 /* Suitable Rx and Tx SBs are served by the same client */
5584 if (i >= bp->num_rx_queues)
5585 fp->cl_id -= bp->num_rx_queues;
5587 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5588 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5589 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5591 bnx2x_update_fpsb_idx(fp);
5594 /* ensure status block indices were read */
5598 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5600 bnx2x_update_dsb_idx(bp);
5601 bnx2x_update_coalesce(bp);
5602 bnx2x_init_rx_rings(bp);
5603 bnx2x_init_tx_ring(bp);
5604 bnx2x_init_sp_ring(bp);
5605 bnx2x_init_context(bp);
5606 bnx2x_init_internal(bp, load_code);
5607 bnx2x_init_ind_table(bp);
5608 bnx2x_stats_init(bp);
5610 /* At this point, we are ready for interrupts */
5611 atomic_set(&bp->intr_sem, 0);
5613 /* flush all before enabling interrupts */
5617 bnx2x_int_enable(bp);
5619 /* Check for SPIO5 */
5620 bnx2x_attn_int_deasserted0(bp,
5621 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5622 AEU_INPUTS_ATTN_BITS_SPIO5);
5625 /* end of nic init */
5628 * gzip service functions
5631 static int bnx2x_gunzip_init(struct bnx2x *bp)
5633 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5634 &bp->gunzip_mapping);
5635 if (bp->gunzip_buf == NULL)
5638 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5639 if (bp->strm == NULL)
5642 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5644 if (bp->strm->workspace == NULL)
5654 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5655 bp->gunzip_mapping);
5656 bp->gunzip_buf = NULL;
5659 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5660 " un-compression\n", bp->dev->name);
5664 static void bnx2x_gunzip_end(struct bnx2x *bp)
5666 kfree(bp->strm->workspace);
5671 if (bp->gunzip_buf) {
5672 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673 bp->gunzip_mapping);
5674 bp->gunzip_buf = NULL;
5678 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5682 /* check gzip header */
5683 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5684 BNX2X_ERR("Bad gzip header\n");
5692 if (zbuf[3] & FNAME)
5693 while ((zbuf[n++] != 0) && (n < len));
5695 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5696 bp->strm->avail_in = len - n;
5697 bp->strm->next_out = bp->gunzip_buf;
5698 bp->strm->avail_out = FW_BUF_SIZE;
5700 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5704 rc = zlib_inflate(bp->strm, Z_FINISH);
5705 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5706 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5707 bp->dev->name, bp->strm->msg);
5709 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5710 if (bp->gunzip_outlen & 0x3)
5711 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5712 " gunzip_outlen (%d) not aligned\n",
5713 bp->dev->name, bp->gunzip_outlen);
5714 bp->gunzip_outlen >>= 2;
5716 zlib_inflateEnd(bp->strm);
5718 if (rc == Z_STREAM_END)
5724 /* nic load/unload */
5727 * General service functions
5730 /* send a NIG loopback debug packet */
5731 static void bnx2x_lb_pckt(struct bnx2x *bp)
5735 /* Ethernet source and destination addresses */
5736 wb_write[0] = 0x55555555;
5737 wb_write[1] = 0x55555555;
5738 wb_write[2] = 0x20; /* SOP */
5739 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5741 /* NON-IP protocol */
5742 wb_write[0] = 0x09000000;
5743 wb_write[1] = 0x55555555;
5744 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5745 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5748 /* some of the internal memories
5749 * are not directly readable from the driver
5750 * to test them we send debug packets
5752 static int bnx2x_int_mem_test(struct bnx2x *bp)
5758 if (CHIP_REV_IS_FPGA(bp))
5760 else if (CHIP_REV_IS_EMUL(bp))
5765 DP(NETIF_MSG_HW, "start part1\n");
5767 /* Disable inputs of parser neighbor blocks */
5768 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5769 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5770 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5771 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5773 /* Write 0 to parser credits for CFC search request */
5774 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5776 /* send Ethernet packet */
5779 /* TODO do i reset NIG statistic? */
5780 /* Wait until NIG register shows 1 packet of size 0x10 */
5781 count = 1000 * factor;
5784 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5785 val = *bnx2x_sp(bp, wb_data[0]);
5793 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5797 /* Wait until PRS register shows 1 packet */
5798 count = 1000 * factor;
5800 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5808 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5812 /* Reset and init BRB, PRS */
5813 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5815 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5817 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5818 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5820 DP(NETIF_MSG_HW, "part2\n");
5822 /* Disable inputs of parser neighbor blocks */
5823 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5824 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5825 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5826 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5828 /* Write 0 to parser credits for CFC search request */
5829 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5831 /* send 10 Ethernet packets */
5832 for (i = 0; i < 10; i++)
5835 /* Wait until NIG register shows 10 + 1
5836 packets of size 11*0x10 = 0xb0 */
5837 count = 1000 * factor;
5840 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5841 val = *bnx2x_sp(bp, wb_data[0]);
5849 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5853 /* Wait until PRS register shows 2 packets */
5854 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5856 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5858 /* Write 1 to parser credits for CFC search request */
5859 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5861 /* Wait until PRS register shows 3 packets */
5862 msleep(10 * factor);
5863 /* Wait until NIG register shows 1 packet of size 0x10 */
5864 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5866 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5868 /* clear NIG EOP FIFO */
5869 for (i = 0; i < 11; i++)
5870 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5871 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5873 BNX2X_ERR("clear of NIG failed\n");
5877 /* Reset and init BRB, PRS, NIG */
5878 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5880 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5882 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5883 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5886 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5889 /* Enable inputs of parser neighbor blocks */
5890 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5891 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5892 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5893 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5895 DP(NETIF_MSG_HW, "done\n");
5900 static void enable_blocks_attention(struct bnx2x *bp)
5902 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5903 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5904 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5905 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5906 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5907 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5908 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5909 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5910 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5911 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5912 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5913 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5914 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5915 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5916 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5917 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5918 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5919 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5920 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5921 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5922 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5923 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5924 if (CHIP_REV_IS_FPGA(bp))
5925 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5927 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5928 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5929 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5930 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5931 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5932 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5933 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5934 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5935 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5936 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5940 static void bnx2x_reset_common(struct bnx2x *bp)
5943 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5945 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5948 static void bnx2x_init_pxp(struct bnx2x *bp)
5951 int r_order, w_order;
5953 pci_read_config_word(bp->pdev,
5954 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5955 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5956 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5958 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5960 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5964 bnx2x_init_pxp_arb(bp, r_order, w_order);
5967 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5973 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5974 SHARED_HW_CFG_FAN_FAILURE_MASK;
5976 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5980 * The fan failure mechanism is usually related to the PHY type since
5981 * the power consumption of the board is affected by the PHY. Currently,
5982 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5984 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5985 for (port = PORT_0; port < PORT_MAX; port++) {
5987 SHMEM_RD(bp, dev_info.port_hw_config[port].
5988 external_phy_config) &
5989 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5992 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5994 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5996 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5999 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6001 if (is_required == 0)
6004 /* Fan failure is indicated by SPIO 5 */
6005 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6006 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6008 /* set to active low mode */
6009 val = REG_RD(bp, MISC_REG_SPIO_INT);
6010 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6011 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6012 REG_WR(bp, MISC_REG_SPIO_INT, val);
6014 /* enable interrupt to signal the IGU */
6015 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6016 val |= (1 << MISC_REGISTERS_SPIO_5);
6017 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6020 static int bnx2x_init_common(struct bnx2x *bp)
6024 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6026 bnx2x_reset_common(bp);
6027 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6028 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6030 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6031 if (CHIP_IS_E1H(bp))
6032 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6034 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6036 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6038 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6039 if (CHIP_IS_E1(bp)) {
6040 /* enable HW interrupt from PXP on USDM overflow
6041 bit 16 on INT_MASK_0 */
6042 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6045 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6049 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6050 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6051 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6052 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6053 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6054 /* make sure this value is 0 */
6055 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6057 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6058 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6059 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6060 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6061 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6064 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6066 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6067 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6068 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6071 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6072 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6074 /* let the HW do it's magic ... */
6076 /* finish PXP init */
6077 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6079 BNX2X_ERR("PXP2 CFG failed\n");
6082 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6084 BNX2X_ERR("PXP2 RD_INIT failed\n");
6088 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6089 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6091 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6093 /* clean the DMAE memory */
6095 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6097 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6098 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6099 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6100 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6102 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6103 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6104 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6105 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6107 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6108 /* soft reset pulse */
6109 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6110 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6113 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6116 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6117 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6118 if (!CHIP_REV_IS_SLOW(bp)) {
6119 /* enable hw interrupt from doorbell Q */
6120 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6123 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6124 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6125 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6127 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6128 if (CHIP_IS_E1H(bp))
6129 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6131 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6132 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6133 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6134 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6136 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6137 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6138 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6139 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6141 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6142 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6143 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6144 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6147 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6149 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6152 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6153 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6154 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6156 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6157 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6158 REG_WR(bp, i, 0xc0cac01a);
6159 /* TODO: replace with something meaningful */
6161 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6162 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6164 if (sizeof(union cdu_context) != 1024)
6165 /* we currently assume that a context is 1024 bytes */
6166 printk(KERN_ALERT PFX "please adjust the size of"
6167 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6169 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6170 val = (4 << 24) + (0 << 12) + 1024;
6171 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6173 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6174 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6175 /* enable context validation interrupt from CFC */
6176 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6178 /* set the thresholds to prevent CFC/CDU race */
6179 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6181 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6182 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6184 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6185 /* Reset PCIE errors for debug */
6186 REG_WR(bp, 0x2814, 0xffffffff);
6187 REG_WR(bp, 0x3820, 0xffffffff);
6189 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6190 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6192 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6194 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6195 if (CHIP_IS_E1H(bp)) {
6196 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6197 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6200 if (CHIP_REV_IS_SLOW(bp))
6203 /* finish CFC init */
6204 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6206 BNX2X_ERR("CFC LL_INIT failed\n");
6209 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6211 BNX2X_ERR("CFC AC_INIT failed\n");
6214 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6216 BNX2X_ERR("CFC CAM_INIT failed\n");
6219 REG_WR(bp, CFC_REG_DEBUG0, 0);
6221 /* read NIG statistic
6222 to see if this is our first up since powerup */
6223 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6224 val = *bnx2x_sp(bp, wb_data[0]);
6226 /* do internal memory self test */
6227 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6228 BNX2X_ERR("internal mem self test failed\n");
6232 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6233 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6234 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6235 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6236 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6237 bp->port.need_hw_lock = 1;
6244 bnx2x_setup_fan_failure_detection(bp);
6246 /* clear PXP2 attentions */
6247 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6249 enable_blocks_attention(bp);
6251 if (!BP_NOMCP(bp)) {
6252 bnx2x_acquire_phy_lock(bp);
6253 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6254 bnx2x_release_phy_lock(bp);
6256 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6261 static int bnx2x_init_port(struct bnx2x *bp)
6263 int port = BP_PORT(bp);
6264 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6268 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6270 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6272 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6273 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6275 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6276 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6277 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6282 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6283 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6284 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6285 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6290 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6291 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6292 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6293 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6298 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6299 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6300 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6301 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6303 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6306 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6307 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6309 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6311 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6313 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6314 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6315 /* no pause for emulation and FPGA */
6320 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6321 else if (bp->dev->mtu > 4096) {
6322 if (bp->flags & ONE_PORT_FLAG)
6326 /* (24*1024 + val*4)/256 */
6327 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6330 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6331 high = low + 56; /* 14*1024/256 */
6333 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6334 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6337 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6339 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6340 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6341 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6342 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6344 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6345 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6346 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6347 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6349 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6350 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6352 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6354 /* configure PBF to work without PAUSE mtu 9000 */
6355 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6357 /* update threshold */
6358 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6359 /* update init credit */
6360 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6363 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6365 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6368 /* tell the searcher where the T2 table is */
6369 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6371 wb_write[0] = U64_LO(bp->t2_mapping);
6372 wb_write[1] = U64_HI(bp->t2_mapping);
6373 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6374 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6375 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6376 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6378 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6380 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6381 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6383 if (CHIP_IS_E1(bp)) {
6384 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6385 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6387 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6389 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6390 /* init aeu_mask_attn_func_0/1:
6391 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6392 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6393 * bits 4-7 are used for "per vn group attention" */
6394 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6395 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6397 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6398 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6399 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6400 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6401 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6403 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6405 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6407 if (CHIP_IS_E1H(bp)) {
6408 /* 0x2 disable e1hov, 0x1 enable */
6409 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6410 (IS_E1HMF(bp) ? 0x1 : 0x2));
6413 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6414 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6415 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6419 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6420 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6422 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6423 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6425 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6427 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6428 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6430 /* The GPIO should be swapped if the swap register is
6432 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6433 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6435 /* Select function upon port-swap configuration */
6437 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6438 aeu_gpio_mask = (swap_val && swap_override) ?
6439 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6440 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6442 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6443 aeu_gpio_mask = (swap_val && swap_override) ?
6444 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6445 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6447 val = REG_RD(bp, offset);
6448 /* add GPIO3 to group */
6449 val |= aeu_gpio_mask;
6450 REG_WR(bp, offset, val);
6454 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6455 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6456 /* add SPIO 5 to group 0 */
6458 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6459 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6460 val = REG_RD(bp, reg_addr);
6461 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6462 REG_WR(bp, reg_addr, val);
6470 bnx2x__link_reset(bp);
6475 #define ILT_PER_FUNC (768/2)
6476 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6477 /* the phys address is shifted right 12 bits and has an added
6478 1=valid bit added to the 53rd bit
6479 then since this is a wide register(TM)
6480 we split it into two 32 bit writes
6482 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6483 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6484 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6485 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6487 #define CNIC_ILT_LINES 0
6489 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6493 if (CHIP_IS_E1H(bp))
6494 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6496 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6498 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6501 static int bnx2x_init_func(struct bnx2x *bp)
6503 int port = BP_PORT(bp);
6504 int func = BP_FUNC(bp);
6508 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6510 /* set MSI reconfigure capability */
6511 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6512 val = REG_RD(bp, addr);
6513 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6514 REG_WR(bp, addr, val);
6516 i = FUNC_ILT_BASE(func);
6518 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6519 if (CHIP_IS_E1H(bp)) {
6520 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6521 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6523 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6524 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6527 if (CHIP_IS_E1H(bp)) {
6528 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6529 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6530 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6531 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6532 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6533 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6534 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6535 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6536 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6538 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6539 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6542 /* HC init per function */
6543 if (CHIP_IS_E1H(bp)) {
6544 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6546 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6547 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6549 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6551 /* Reset PCIE errors for debug */
6552 REG_WR(bp, 0x2114, 0xffffffff);
6553 REG_WR(bp, 0x2120, 0xffffffff);
6558 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6562 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6563 BP_FUNC(bp), load_code);
6566 mutex_init(&bp->dmae_mutex);
6567 rc = bnx2x_gunzip_init(bp);
6571 switch (load_code) {
6572 case FW_MSG_CODE_DRV_LOAD_COMMON:
6573 rc = bnx2x_init_common(bp);
6578 case FW_MSG_CODE_DRV_LOAD_PORT:
6580 rc = bnx2x_init_port(bp);
6585 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6587 rc = bnx2x_init_func(bp);
6593 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6597 if (!BP_NOMCP(bp)) {
6598 int func = BP_FUNC(bp);
6600 bp->fw_drv_pulse_wr_seq =
6601 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6602 DRV_PULSE_SEQ_MASK);
6603 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6606 /* this needs to be done before gunzip end */
6607 bnx2x_zero_def_sb(bp);
6608 for_each_queue(bp, i)
6609 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6612 bnx2x_gunzip_end(bp);
6617 static void bnx2x_free_mem(struct bnx2x *bp)
6620 #define BNX2X_PCI_FREE(x, y, size) \
6623 pci_free_consistent(bp->pdev, size, x, y); \
6629 #define BNX2X_FREE(x) \
6641 for_each_queue(bp, i) {
6644 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6645 bnx2x_fp(bp, i, status_blk_mapping),
6646 sizeof(struct host_status_block));
6649 for_each_rx_queue(bp, i) {
6651 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6652 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6653 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6654 bnx2x_fp(bp, i, rx_desc_mapping),
6655 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6657 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6658 bnx2x_fp(bp, i, rx_comp_mapping),
6659 sizeof(struct eth_fast_path_rx_cqe) *
6663 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6664 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6665 bnx2x_fp(bp, i, rx_sge_mapping),
6666 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6669 for_each_tx_queue(bp, i) {
6671 /* fastpath tx rings: tx_buf tx_desc */
6672 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6673 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6674 bnx2x_fp(bp, i, tx_desc_mapping),
6675 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6677 /* end of fastpath */
6679 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6680 sizeof(struct host_def_status_block));
6682 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6683 sizeof(struct bnx2x_slowpath));
6686 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6687 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6688 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6689 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6691 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6693 #undef BNX2X_PCI_FREE
6697 static int bnx2x_alloc_mem(struct bnx2x *bp)
6700 #define BNX2X_PCI_ALLOC(x, y, size) \
6702 x = pci_alloc_consistent(bp->pdev, size, y); \
6704 goto alloc_mem_err; \
6705 memset(x, 0, size); \
6708 #define BNX2X_ALLOC(x, size) \
6710 x = vmalloc(size); \
6712 goto alloc_mem_err; \
6713 memset(x, 0, size); \
6720 for_each_queue(bp, i) {
6721 bnx2x_fp(bp, i, bp) = bp;
6724 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6725 &bnx2x_fp(bp, i, status_blk_mapping),
6726 sizeof(struct host_status_block));
6729 for_each_rx_queue(bp, i) {
6731 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6732 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6733 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6734 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6735 &bnx2x_fp(bp, i, rx_desc_mapping),
6736 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6738 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6739 &bnx2x_fp(bp, i, rx_comp_mapping),
6740 sizeof(struct eth_fast_path_rx_cqe) *
6744 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6745 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6746 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6747 &bnx2x_fp(bp, i, rx_sge_mapping),
6748 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6751 for_each_tx_queue(bp, i) {
6753 /* fastpath tx rings: tx_buf tx_desc */
6754 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6755 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6756 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6757 &bnx2x_fp(bp, i, tx_desc_mapping),
6758 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6760 /* end of fastpath */
6762 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6763 sizeof(struct host_def_status_block));
6765 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6766 sizeof(struct bnx2x_slowpath));
6769 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6772 for (i = 0; i < 64*1024; i += 64) {
6773 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6774 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6777 /* allocate searcher T2 table
6778 we allocate 1/4 of alloc num for T2
6779 (which is not entered into the ILT) */
6780 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6783 for (i = 0; i < 16*1024; i += 64)
6784 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6786 /* now fixup the last line in the block to point to the next block */
6787 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6789 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6790 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6792 /* QM queues (128*MAX_CONN) */
6793 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6796 /* Slow path ring */
6797 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6805 #undef BNX2X_PCI_ALLOC
6809 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6813 for_each_tx_queue(bp, i) {
6814 struct bnx2x_fastpath *fp = &bp->fp[i];
6816 u16 bd_cons = fp->tx_bd_cons;
6817 u16 sw_prod = fp->tx_pkt_prod;
6818 u16 sw_cons = fp->tx_pkt_cons;
6820 while (sw_cons != sw_prod) {
6821 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6827 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6831 for_each_rx_queue(bp, j) {
6832 struct bnx2x_fastpath *fp = &bp->fp[j];
6834 for (i = 0; i < NUM_RX_BD; i++) {
6835 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6836 struct sk_buff *skb = rx_buf->skb;
6841 pci_unmap_single(bp->pdev,
6842 pci_unmap_addr(rx_buf, mapping),
6843 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6848 if (!fp->disable_tpa)
6849 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6850 ETH_MAX_AGGREGATION_QUEUES_E1 :
6851 ETH_MAX_AGGREGATION_QUEUES_E1H);
6855 static void bnx2x_free_skbs(struct bnx2x *bp)
6857 bnx2x_free_tx_skbs(bp);
6858 bnx2x_free_rx_skbs(bp);
6861 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6865 free_irq(bp->msix_table[0].vector, bp->dev);
6866 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6867 bp->msix_table[0].vector);
6869 for_each_queue(bp, i) {
6870 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6871 "state %x\n", i, bp->msix_table[i + offset].vector,
6872 bnx2x_fp(bp, i, state));
6874 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6878 static void bnx2x_free_irq(struct bnx2x *bp)
6880 if (bp->flags & USING_MSIX_FLAG) {
6881 bnx2x_free_msix_irqs(bp);
6882 pci_disable_msix(bp->pdev);
6883 bp->flags &= ~USING_MSIX_FLAG;
6885 } else if (bp->flags & USING_MSI_FLAG) {
6886 free_irq(bp->pdev->irq, bp->dev);
6887 pci_disable_msi(bp->pdev);
6888 bp->flags &= ~USING_MSI_FLAG;
6891 free_irq(bp->pdev->irq, bp->dev);
6894 static int bnx2x_enable_msix(struct bnx2x *bp)
6896 int i, rc, offset = 1;
6899 bp->msix_table[0].entry = igu_vec;
6900 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6902 for_each_queue(bp, i) {
6903 igu_vec = BP_L_ID(bp) + offset + i;
6904 bp->msix_table[i + offset].entry = igu_vec;
6905 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6906 "(fastpath #%u)\n", i + offset, igu_vec, i);
6909 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6910 BNX2X_NUM_QUEUES(bp) + offset);
6912 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6916 bp->flags |= USING_MSIX_FLAG;
6921 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6923 int i, rc, offset = 1;
6925 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6926 bp->dev->name, bp->dev);
6928 BNX2X_ERR("request sp irq failed\n");
6932 for_each_queue(bp, i) {
6933 struct bnx2x_fastpath *fp = &bp->fp[i];
6935 if (i < bp->num_rx_queues)
6936 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6938 sprintf(fp->name, "%s-tx-%d",
6939 bp->dev->name, i - bp->num_rx_queues);
6941 rc = request_irq(bp->msix_table[i + offset].vector,
6942 bnx2x_msix_fp_int, 0, fp->name, fp);
6944 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6945 bnx2x_free_msix_irqs(bp);
6949 fp->state = BNX2X_FP_STATE_IRQ;
6952 i = BNX2X_NUM_QUEUES(bp);
6953 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6955 bp->dev->name, bp->msix_table[0].vector,
6956 0, bp->msix_table[offset].vector,
6957 i - 1, bp->msix_table[offset + i - 1].vector);
6962 static int bnx2x_enable_msi(struct bnx2x *bp)
6966 rc = pci_enable_msi(bp->pdev);
6968 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6971 bp->flags |= USING_MSI_FLAG;
6976 static int bnx2x_req_irq(struct bnx2x *bp)
6978 unsigned long flags;
6981 if (bp->flags & USING_MSI_FLAG)
6984 flags = IRQF_SHARED;
6986 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6987 bp->dev->name, bp->dev);
6989 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6994 static void bnx2x_napi_enable(struct bnx2x *bp)
6998 for_each_rx_queue(bp, i)
6999 napi_enable(&bnx2x_fp(bp, i, napi));
7002 static void bnx2x_napi_disable(struct bnx2x *bp)
7006 for_each_rx_queue(bp, i)
7007 napi_disable(&bnx2x_fp(bp, i, napi));
7010 static void bnx2x_netif_start(struct bnx2x *bp)
7014 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7015 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7018 if (netif_running(bp->dev)) {
7019 bnx2x_napi_enable(bp);
7020 bnx2x_int_enable(bp);
7021 if (bp->state == BNX2X_STATE_OPEN)
7022 netif_tx_wake_all_queues(bp->dev);
7027 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7029 bnx2x_int_disable_sync(bp, disable_hw);
7030 bnx2x_napi_disable(bp);
7031 netif_tx_disable(bp->dev);
7032 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7036 * Init service functions
7039 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
7041 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7042 int port = BP_PORT(bp);
7045 * unicasts 0-31:port0 32-63:port1
7046 * multicast 64-127:port0 128-191:port1
7048 config->hdr.length = 2;
7049 config->hdr.offset = port ? 32 : 0;
7050 config->hdr.client_id = bp->fp->cl_id;
7051 config->hdr.reserved1 = 0;
7054 config->config_table[0].cam_entry.msb_mac_addr =
7055 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7056 config->config_table[0].cam_entry.middle_mac_addr =
7057 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7058 config->config_table[0].cam_entry.lsb_mac_addr =
7059 swab16(*(u16 *)&bp->dev->dev_addr[4]);
7060 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7062 config->config_table[0].target_table_entry.flags = 0;
7064 CAM_INVALIDATE(config->config_table[0]);
7065 config->config_table[0].target_table_entry.clients_bit_vector =
7066 cpu_to_le32(1 << BP_L_ID(bp));
7067 config->config_table[0].target_table_entry.vlan_id = 0;
7069 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7070 (set ? "setting" : "clearing"),
7071 config->config_table[0].cam_entry.msb_mac_addr,
7072 config->config_table[0].cam_entry.middle_mac_addr,
7073 config->config_table[0].cam_entry.lsb_mac_addr);
7076 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7077 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7078 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
7079 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7081 config->config_table[1].target_table_entry.flags =
7082 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7084 CAM_INVALIDATE(config->config_table[1]);
7085 config->config_table[1].target_table_entry.clients_bit_vector =
7086 cpu_to_le32(1 << BP_L_ID(bp));
7087 config->config_table[1].target_table_entry.vlan_id = 0;
7089 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7090 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7091 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7094 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7096 struct mac_configuration_cmd_e1h *config =
7097 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7099 /* CAM allocation for E1H
7100 * unicasts: by func number
7101 * multicast: 20+FUNC*20, 20 each
7103 config->hdr.length = 1;
7104 config->hdr.offset = BP_FUNC(bp);
7105 config->hdr.client_id = bp->fp->cl_id;
7106 config->hdr.reserved1 = 0;
7109 config->config_table[0].msb_mac_addr =
7110 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7111 config->config_table[0].middle_mac_addr =
7112 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7113 config->config_table[0].lsb_mac_addr =
7114 swab16(*(u16 *)&bp->dev->dev_addr[4]);
7115 config->config_table[0].clients_bit_vector =
7116 cpu_to_le32(1 << BP_L_ID(bp));
7117 config->config_table[0].vlan_id = 0;
7118 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7120 config->config_table[0].flags = BP_PORT(bp);
7122 config->config_table[0].flags =
7123 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7125 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7126 (set ? "setting" : "clearing"),
7127 config->config_table[0].msb_mac_addr,
7128 config->config_table[0].middle_mac_addr,
7129 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7131 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7132 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7133 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7136 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7137 int *state_p, int poll)
7139 /* can take a while if any port is running */
7142 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7143 poll ? "polling" : "waiting", state, idx);
7148 bnx2x_rx_int(bp->fp, 10);
7149 /* if index is different from 0
7150 * the reply for some commands will
7151 * be on the non default queue
7154 bnx2x_rx_int(&bp->fp[idx], 10);
7157 mb(); /* state is changed by bnx2x_sp_event() */
7158 if (*state_p == state) {
7159 #ifdef BNX2X_STOP_ON_ERROR
7160 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7172 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7173 poll ? "polling" : "waiting", state, idx);
7174 #ifdef BNX2X_STOP_ON_ERROR
7181 static int bnx2x_setup_leading(struct bnx2x *bp)
7185 /* reset IGU state */
7186 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7189 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7191 /* Wait for completion */
7192 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7197 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7199 struct bnx2x_fastpath *fp = &bp->fp[index];
7201 /* reset IGU state */
7202 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7205 fp->state = BNX2X_FP_STATE_OPENING;
7206 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7209 /* Wait for completion */
7210 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7214 static int bnx2x_poll(struct napi_struct *napi, int budget);
7216 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7217 int *num_tx_queues_out)
7219 int _num_rx_queues = 0, _num_tx_queues = 0;
7221 switch (bp->multi_mode) {
7222 case ETH_RSS_MODE_DISABLED:
7227 case ETH_RSS_MODE_REGULAR:
7229 _num_rx_queues = min_t(u32, num_rx_queues,
7230 BNX2X_MAX_QUEUES(bp));
7232 _num_rx_queues = min_t(u32, num_online_cpus(),
7233 BNX2X_MAX_QUEUES(bp));
7236 _num_tx_queues = min_t(u32, num_tx_queues,
7237 BNX2X_MAX_QUEUES(bp));
7239 _num_tx_queues = min_t(u32, num_online_cpus(),
7240 BNX2X_MAX_QUEUES(bp));
7242 /* There must be not more Tx queues than Rx queues */
7243 if (_num_tx_queues > _num_rx_queues) {
7244 BNX2X_ERR("number of tx queues (%d) > "
7245 "number of rx queues (%d)"
7246 " defaulting to %d\n",
7247 _num_tx_queues, _num_rx_queues,
7249 _num_tx_queues = _num_rx_queues;
7260 *num_rx_queues_out = _num_rx_queues;
7261 *num_tx_queues_out = _num_tx_queues;
7264 static int bnx2x_set_int_mode(struct bnx2x *bp)
7271 bp->num_rx_queues = 1;
7272 bp->num_tx_queues = 1;
7273 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7278 /* Set interrupt mode according to bp->multi_mode value */
7279 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7280 &bp->num_tx_queues);
7282 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7283 bp->num_rx_queues, bp->num_tx_queues);
7285 /* if we can't use MSI-X we only need one fp,
7286 * so try to enable MSI-X with the requested number of fp's
7287 * and fallback to MSI or legacy INTx with one fp
7289 rc = bnx2x_enable_msix(bp);
7291 /* failed to enable MSI-X */
7293 BNX2X_ERR("Multi requested but failed to "
7294 "enable MSI-X (rx %d tx %d), "
7295 "set number of queues to 1\n",
7296 bp->num_rx_queues, bp->num_tx_queues);
7297 bp->num_rx_queues = 1;
7298 bp->num_tx_queues = 1;
7302 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7307 /* must be called with rtnl_lock */
7308 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7313 #ifdef BNX2X_STOP_ON_ERROR
7314 if (unlikely(bp->panic))
7318 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7320 rc = bnx2x_set_int_mode(bp);
7322 if (bnx2x_alloc_mem(bp))
7325 for_each_rx_queue(bp, i)
7326 bnx2x_fp(bp, i, disable_tpa) =
7327 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7329 for_each_rx_queue(bp, i)
7330 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7333 bnx2x_napi_enable(bp);
7335 if (bp->flags & USING_MSIX_FLAG) {
7336 rc = bnx2x_req_msix_irqs(bp);
7338 pci_disable_msix(bp->pdev);
7342 /* Fall to INTx if failed to enable MSI-X due to lack of
7343 memory (in bnx2x_set_int_mode()) */
7344 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7345 bnx2x_enable_msi(bp);
7347 rc = bnx2x_req_irq(bp);
7349 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7350 if (bp->flags & USING_MSI_FLAG)
7351 pci_disable_msi(bp->pdev);
7354 if (bp->flags & USING_MSI_FLAG) {
7355 bp->dev->irq = bp->pdev->irq;
7356 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7357 bp->dev->name, bp->pdev->irq);
7361 /* Send LOAD_REQUEST command to MCP
7362 Returns the type of LOAD command:
7363 if it is the first port to be initialized
7364 common blocks should be initialized, otherwise - not
7366 if (!BP_NOMCP(bp)) {
7367 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7369 BNX2X_ERR("MCP response failure, aborting\n");
7373 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7374 rc = -EBUSY; /* other port in diagnostic mode */
7379 int port = BP_PORT(bp);
7381 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7382 load_count[0], load_count[1], load_count[2]);
7384 load_count[1 + port]++;
7385 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7386 load_count[0], load_count[1], load_count[2]);
7387 if (load_count[0] == 1)
7388 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7389 else if (load_count[1 + port] == 1)
7390 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7392 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7395 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7396 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7400 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7403 rc = bnx2x_init_hw(bp, load_code);
7405 BNX2X_ERR("HW init failed, aborting\n");
7409 /* Setup NIC internals and enable interrupts */
7410 bnx2x_nic_init(bp, load_code);
7412 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7413 (bp->common.shmem2_base))
7414 SHMEM2_WR(bp, dcc_support,
7415 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7416 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7418 /* Send LOAD_DONE command to MCP */
7419 if (!BP_NOMCP(bp)) {
7420 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7422 BNX2X_ERR("MCP response failure, aborting\n");
7428 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7430 rc = bnx2x_setup_leading(bp);
7432 BNX2X_ERR("Setup leading failed!\n");
7433 #ifndef BNX2X_STOP_ON_ERROR
7441 if (CHIP_IS_E1H(bp))
7442 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7443 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7444 bp->state = BNX2X_STATE_DISABLED;
7447 if (bp->state == BNX2X_STATE_OPEN) {
7448 for_each_nondefault_queue(bp, i) {
7449 rc = bnx2x_setup_multi(bp, i);
7455 bnx2x_set_mac_addr_e1(bp, 1);
7457 bnx2x_set_mac_addr_e1h(bp, 1);
7461 bnx2x_initial_phy_init(bp, load_mode);
7463 /* Start fast path */
7464 switch (load_mode) {
7466 if (bp->state == BNX2X_STATE_OPEN) {
7467 /* Tx queue should be only reenabled */
7468 netif_tx_wake_all_queues(bp->dev);
7470 /* Initialize the receive filter. */
7471 bnx2x_set_rx_mode(bp->dev);
7475 netif_tx_start_all_queues(bp->dev);
7476 if (bp->state != BNX2X_STATE_OPEN)
7477 netif_tx_disable(bp->dev);
7478 /* Initialize the receive filter. */
7479 bnx2x_set_rx_mode(bp->dev);
7483 /* Initialize the receive filter. */
7484 bnx2x_set_rx_mode(bp->dev);
7485 bp->state = BNX2X_STATE_DIAG;
7493 bnx2x__link_status_update(bp);
7495 /* start the timer */
7496 mod_timer(&bp->timer, jiffies + bp->current_interval);
7502 bnx2x_int_disable_sync(bp, 1);
7503 if (!BP_NOMCP(bp)) {
7504 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7505 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7508 /* Free SKBs, SGEs, TPA pool and driver internals */
7509 bnx2x_free_skbs(bp);
7510 for_each_rx_queue(bp, i)
7511 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7516 bnx2x_napi_disable(bp);
7517 for_each_rx_queue(bp, i)
7518 netif_napi_del(&bnx2x_fp(bp, i, napi));
7524 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7526 struct bnx2x_fastpath *fp = &bp->fp[index];
7529 /* halt the connection */
7530 fp->state = BNX2X_FP_STATE_HALTING;
7531 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7533 /* Wait for completion */
7534 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7536 if (rc) /* timeout */
7539 /* delete cfc entry */
7540 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7542 /* Wait for completion */
7543 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7548 static int bnx2x_stop_leading(struct bnx2x *bp)
7550 __le16 dsb_sp_prod_idx;
7551 /* if the other port is handling traffic,
7552 this can take a lot of time */
7558 /* Send HALT ramrod */
7559 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7560 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7562 /* Wait for completion */
7563 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7564 &(bp->fp[0].state), 1);
7565 if (rc) /* timeout */
7568 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7570 /* Send PORT_DELETE ramrod */
7571 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7573 /* Wait for completion to arrive on default status block
7574 we are going to reset the chip anyway
7575 so there is not much to do if this times out
7577 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7579 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7580 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7581 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7582 #ifdef BNX2X_STOP_ON_ERROR
7590 rmb(); /* Refresh the dsb_sp_prod */
7592 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7593 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7598 static void bnx2x_reset_func(struct bnx2x *bp)
7600 int port = BP_PORT(bp);
7601 int func = BP_FUNC(bp);
7605 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7606 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7609 base = FUNC_ILT_BASE(func);
7610 for (i = base; i < base + ILT_PER_FUNC; i++)
7611 bnx2x_ilt_wr(bp, i, 0);
7614 static void bnx2x_reset_port(struct bnx2x *bp)
7616 int port = BP_PORT(bp);
7619 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7621 /* Do not rcv packets to BRB */
7622 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7623 /* Do not direct rcv packets that are not for MCP to the BRB */
7624 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7625 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7628 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7631 /* Check for BRB port occupancy */
7632 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7634 DP(NETIF_MSG_IFDOWN,
7635 "BRB1 is not empty %d blocks are occupied\n", val);
7637 /* TODO: Close Doorbell port? */
7640 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7642 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7643 BP_FUNC(bp), reset_code);
7645 switch (reset_code) {
7646 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7647 bnx2x_reset_port(bp);
7648 bnx2x_reset_func(bp);
7649 bnx2x_reset_common(bp);
7652 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7653 bnx2x_reset_port(bp);
7654 bnx2x_reset_func(bp);
7657 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7658 bnx2x_reset_func(bp);
7662 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7667 /* must be called with rtnl_lock */
7668 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7670 int port = BP_PORT(bp);
7674 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7676 /* Set "drop all" */
7677 bp->rx_mode = BNX2X_RX_MODE_NONE;
7678 bnx2x_set_storm_rx_mode(bp);
7680 /* Disable HW interrupts, NAPI and Tx */
7681 bnx2x_netif_stop(bp, 1);
7683 del_timer_sync(&bp->timer);
7684 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7685 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7686 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7691 /* Wait until tx fastpath tasks complete */
7692 for_each_tx_queue(bp, i) {
7693 struct bnx2x_fastpath *fp = &bp->fp[i];
7696 while (bnx2x_has_tx_work_unload(fp)) {
7700 BNX2X_ERR("timeout waiting for queue[%d]\n",
7702 #ifdef BNX2X_STOP_ON_ERROR
7713 /* Give HW time to discard old tx messages */
7716 if (CHIP_IS_E1(bp)) {
7717 struct mac_configuration_cmd *config =
7718 bnx2x_sp(bp, mcast_config);
7720 bnx2x_set_mac_addr_e1(bp, 0);
7722 for (i = 0; i < config->hdr.length; i++)
7723 CAM_INVALIDATE(config->config_table[i]);
7725 config->hdr.length = i;
7726 if (CHIP_REV_IS_SLOW(bp))
7727 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7729 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7730 config->hdr.client_id = bp->fp->cl_id;
7731 config->hdr.reserved1 = 0;
7733 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7734 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7735 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7738 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7740 bnx2x_set_mac_addr_e1h(bp, 0);
7742 for (i = 0; i < MC_HASH_SIZE; i++)
7743 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7745 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7748 if (unload_mode == UNLOAD_NORMAL)
7749 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7751 else if (bp->flags & NO_WOL_FLAG)
7752 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7755 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7756 u8 *mac_addr = bp->dev->dev_addr;
7758 /* The mac address is written to entries 1-4 to
7759 preserve entry 0 which is used by the PMF */
7760 u8 entry = (BP_E1HVN(bp) + 1)*8;
7762 val = (mac_addr[0] << 8) | mac_addr[1];
7763 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7765 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7766 (mac_addr[4] << 8) | mac_addr[5];
7767 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7769 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7772 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7774 /* Close multi and leading connections
7775 Completions for ramrods are collected in a synchronous way */
7776 for_each_nondefault_queue(bp, i)
7777 if (bnx2x_stop_multi(bp, i))
7780 rc = bnx2x_stop_leading(bp);
7782 BNX2X_ERR("Stop leading failed!\n");
7783 #ifdef BNX2X_STOP_ON_ERROR
7792 reset_code = bnx2x_fw_command(bp, reset_code);
7794 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7795 load_count[0], load_count[1], load_count[2]);
7797 load_count[1 + port]--;
7798 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7799 load_count[0], load_count[1], load_count[2]);
7800 if (load_count[0] == 0)
7801 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7802 else if (load_count[1 + port] == 0)
7803 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7805 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7808 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7809 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7810 bnx2x__link_reset(bp);
7812 /* Reset the chip */
7813 bnx2x_reset_chip(bp, reset_code);
7815 /* Report UNLOAD_DONE to MCP */
7817 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7821 /* Free SKBs, SGEs, TPA pool and driver internals */
7822 bnx2x_free_skbs(bp);
7823 for_each_rx_queue(bp, i)
7824 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7825 for_each_rx_queue(bp, i)
7826 netif_napi_del(&bnx2x_fp(bp, i, napi));
7829 bp->state = BNX2X_STATE_CLOSED;
7831 netif_carrier_off(bp->dev);
7836 static void bnx2x_reset_task(struct work_struct *work)
7838 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7840 #ifdef BNX2X_STOP_ON_ERROR
7841 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7842 " so reset not done to allow debug dump,\n"
7843 " you will need to reboot when done\n");
7849 if (!netif_running(bp->dev))
7850 goto reset_task_exit;
7852 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7853 bnx2x_nic_load(bp, LOAD_NORMAL);
7859 /* end of nic load/unload */
7864 * Init service functions
7867 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7870 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7871 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7872 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7873 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7874 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7875 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7876 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7877 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7879 BNX2X_ERR("Unsupported function index: %d\n", func);
7884 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7886 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7888 /* Flush all outstanding writes */
7891 /* Pretend to be function 0 */
7893 /* Flush the GRC transaction (in the chip) */
7894 new_val = REG_RD(bp, reg);
7896 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7901 /* From now we are in the "like-E1" mode */
7902 bnx2x_int_disable(bp);
7904 /* Flush all outstanding writes */
7907 /* Restore the original funtion settings */
7908 REG_WR(bp, reg, orig_func);
7909 new_val = REG_RD(bp, reg);
7910 if (new_val != orig_func) {
7911 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7912 orig_func, new_val);
7917 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7919 if (CHIP_IS_E1H(bp))
7920 bnx2x_undi_int_disable_e1h(bp, func);
7922 bnx2x_int_disable(bp);
7925 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7929 /* Check if there is any driver already loaded */
7930 val = REG_RD(bp, MISC_REG_UNPREPARED);
7932 /* Check if it is the UNDI driver
7933 * UNDI driver initializes CID offset for normal bell to 0x7
7935 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7936 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7938 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7940 int func = BP_FUNC(bp);
7944 /* clear the UNDI indication */
7945 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7947 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7949 /* try unload UNDI on port 0 */
7952 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7953 DRV_MSG_SEQ_NUMBER_MASK);
7954 reset_code = bnx2x_fw_command(bp, reset_code);
7956 /* if UNDI is loaded on the other port */
7957 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7959 /* send "DONE" for previous unload */
7960 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7962 /* unload UNDI on port 1 */
7965 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7966 DRV_MSG_SEQ_NUMBER_MASK);
7967 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7969 bnx2x_fw_command(bp, reset_code);
7972 /* now it's safe to release the lock */
7973 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7975 bnx2x_undi_int_disable(bp, func);
7977 /* close input traffic and wait for it */
7978 /* Do not rcv packets to BRB */
7980 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7981 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7982 /* Do not direct rcv packets that are not for MCP to
7985 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7986 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7989 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7990 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7993 /* save NIG port swap info */
7994 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7995 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7998 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8001 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8003 /* take the NIG out of reset and restore swap values */
8005 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8006 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8007 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8008 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8010 /* send unload done to the MCP */
8011 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8013 /* restore our func and fw_seq */
8016 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8017 DRV_MSG_SEQ_NUMBER_MASK);
8020 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8024 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8026 u32 val, val2, val3, val4, id;
8029 /* Get the chip revision id and number. */
8030 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8031 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8032 id = ((val & 0xffff) << 16);
8033 val = REG_RD(bp, MISC_REG_CHIP_REV);
8034 id |= ((val & 0xf) << 12);
8035 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8036 id |= ((val & 0xff) << 4);
8037 val = REG_RD(bp, MISC_REG_BOND_ID);
8039 bp->common.chip_id = id;
8040 bp->link_params.chip_id = bp->common.chip_id;
8041 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8043 val = (REG_RD(bp, 0x2874) & 0x55);
8044 if ((bp->common.chip_id & 0x1) ||
8045 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8046 bp->flags |= ONE_PORT_FLAG;
8047 BNX2X_DEV_INFO("single port device\n");
8050 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8051 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8052 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8053 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8054 bp->common.flash_size, bp->common.flash_size);
8056 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8057 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8058 bp->link_params.shmem_base = bp->common.shmem_base;
8059 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8060 bp->common.shmem_base, bp->common.shmem2_base);
8062 if (!bp->common.shmem_base ||
8063 (bp->common.shmem_base < 0xA0000) ||
8064 (bp->common.shmem_base >= 0xC0000)) {
8065 BNX2X_DEV_INFO("MCP not active\n");
8066 bp->flags |= NO_MCP_FLAG;
8070 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8071 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8072 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8073 BNX2X_ERR("BAD MCP validity signature\n");
8075 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8076 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8078 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8079 SHARED_HW_CFG_LED_MODE_MASK) >>
8080 SHARED_HW_CFG_LED_MODE_SHIFT);
8082 bp->link_params.feature_config_flags = 0;
8083 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8084 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8085 bp->link_params.feature_config_flags |=
8086 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8088 bp->link_params.feature_config_flags &=
8089 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8091 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8092 bp->common.bc_ver = val;
8093 BNX2X_DEV_INFO("bc_ver %X\n", val);
8094 if (val < BNX2X_BC_VER) {
8095 /* for now only warn
8096 * later we might need to enforce this */
8097 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8098 " please upgrade BC\n", BNX2X_BC_VER, val);
8100 bp->link_params.feature_config_flags |=
8101 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8102 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8104 if (BP_E1HVN(bp) == 0) {
8105 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8106 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8108 /* no WOL capability for E1HVN != 0 */
8109 bp->flags |= NO_WOL_FLAG;
8111 BNX2X_DEV_INFO("%sWoL capable\n",
8112 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8114 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8115 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8116 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8117 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8119 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8120 val, val2, val3, val4);
8123 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8126 int port = BP_PORT(bp);
8129 switch (switch_cfg) {
8131 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8134 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8135 switch (ext_phy_type) {
8136 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8137 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8140 bp->port.supported |= (SUPPORTED_10baseT_Half |
8141 SUPPORTED_10baseT_Full |
8142 SUPPORTED_100baseT_Half |
8143 SUPPORTED_100baseT_Full |
8144 SUPPORTED_1000baseT_Full |
8145 SUPPORTED_2500baseX_Full |
8150 SUPPORTED_Asym_Pause);
8153 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8154 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8157 bp->port.supported |= (SUPPORTED_10baseT_Half |
8158 SUPPORTED_10baseT_Full |
8159 SUPPORTED_100baseT_Half |
8160 SUPPORTED_100baseT_Full |
8161 SUPPORTED_1000baseT_Full |
8166 SUPPORTED_Asym_Pause);
8170 BNX2X_ERR("NVRAM config error. "
8171 "BAD SerDes ext_phy_config 0x%x\n",
8172 bp->link_params.ext_phy_config);
8176 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8178 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8181 case SWITCH_CFG_10G:
8182 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8185 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8186 switch (ext_phy_type) {
8187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8188 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8191 bp->port.supported |= (SUPPORTED_10baseT_Half |
8192 SUPPORTED_10baseT_Full |
8193 SUPPORTED_100baseT_Half |
8194 SUPPORTED_100baseT_Full |
8195 SUPPORTED_1000baseT_Full |
8196 SUPPORTED_2500baseX_Full |
8197 SUPPORTED_10000baseT_Full |
8202 SUPPORTED_Asym_Pause);
8205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8206 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8209 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8210 SUPPORTED_1000baseT_Full |
8214 SUPPORTED_Asym_Pause);
8217 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8218 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8221 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8222 SUPPORTED_2500baseX_Full |
8223 SUPPORTED_1000baseT_Full |
8227 SUPPORTED_Asym_Pause);
8230 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8231 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8234 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8237 SUPPORTED_Asym_Pause);
8240 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8241 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8244 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8245 SUPPORTED_1000baseT_Full |
8248 SUPPORTED_Asym_Pause);
8251 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8252 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8255 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8256 SUPPORTED_1000baseT_Full |
8260 SUPPORTED_Asym_Pause);
8263 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8264 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8267 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8268 SUPPORTED_1000baseT_Full |
8272 SUPPORTED_Asym_Pause);
8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8276 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8279 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8283 SUPPORTED_Asym_Pause);
8286 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8287 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8290 bp->port.supported |= (SUPPORTED_10baseT_Half |
8291 SUPPORTED_10baseT_Full |
8292 SUPPORTED_100baseT_Half |
8293 SUPPORTED_100baseT_Full |
8294 SUPPORTED_1000baseT_Full |
8295 SUPPORTED_10000baseT_Full |
8299 SUPPORTED_Asym_Pause);
8302 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8303 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8304 bp->link_params.ext_phy_config);
8308 BNX2X_ERR("NVRAM config error. "
8309 "BAD XGXS ext_phy_config 0x%x\n",
8310 bp->link_params.ext_phy_config);
8314 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8316 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8321 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8322 bp->port.link_config);
8325 bp->link_params.phy_addr = bp->port.phy_addr;
8327 /* mask what we support according to speed_cap_mask */
8328 if (!(bp->link_params.speed_cap_mask &
8329 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8330 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8332 if (!(bp->link_params.speed_cap_mask &
8333 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8334 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8336 if (!(bp->link_params.speed_cap_mask &
8337 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8338 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8340 if (!(bp->link_params.speed_cap_mask &
8341 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8342 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8344 if (!(bp->link_params.speed_cap_mask &
8345 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8346 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8347 SUPPORTED_1000baseT_Full);
8349 if (!(bp->link_params.speed_cap_mask &
8350 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8351 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8353 if (!(bp->link_params.speed_cap_mask &
8354 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8355 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8357 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8360 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8362 bp->link_params.req_duplex = DUPLEX_FULL;
8364 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8365 case PORT_FEATURE_LINK_SPEED_AUTO:
8366 if (bp->port.supported & SUPPORTED_Autoneg) {
8367 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8368 bp->port.advertising = bp->port.supported;
8371 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8373 if ((ext_phy_type ==
8374 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8376 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8377 /* force 10G, no AN */
8378 bp->link_params.req_line_speed = SPEED_10000;
8379 bp->port.advertising =
8380 (ADVERTISED_10000baseT_Full |
8384 BNX2X_ERR("NVRAM config error. "
8385 "Invalid link_config 0x%x"
8386 " Autoneg not supported\n",
8387 bp->port.link_config);
8392 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8393 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8394 bp->link_params.req_line_speed = SPEED_10;
8395 bp->port.advertising = (ADVERTISED_10baseT_Full |
8398 BNX2X_ERR("NVRAM config error. "
8399 "Invalid link_config 0x%x"
8400 " speed_cap_mask 0x%x\n",
8401 bp->port.link_config,
8402 bp->link_params.speed_cap_mask);
8407 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8408 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8409 bp->link_params.req_line_speed = SPEED_10;
8410 bp->link_params.req_duplex = DUPLEX_HALF;
8411 bp->port.advertising = (ADVERTISED_10baseT_Half |
8414 BNX2X_ERR("NVRAM config error. "
8415 "Invalid link_config 0x%x"
8416 " speed_cap_mask 0x%x\n",
8417 bp->port.link_config,
8418 bp->link_params.speed_cap_mask);
8423 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8424 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8425 bp->link_params.req_line_speed = SPEED_100;
8426 bp->port.advertising = (ADVERTISED_100baseT_Full |
8429 BNX2X_ERR("NVRAM config error. "
8430 "Invalid link_config 0x%x"
8431 " speed_cap_mask 0x%x\n",
8432 bp->port.link_config,
8433 bp->link_params.speed_cap_mask);
8438 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8439 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8440 bp->link_params.req_line_speed = SPEED_100;
8441 bp->link_params.req_duplex = DUPLEX_HALF;
8442 bp->port.advertising = (ADVERTISED_100baseT_Half |
8445 BNX2X_ERR("NVRAM config error. "
8446 "Invalid link_config 0x%x"
8447 " speed_cap_mask 0x%x\n",
8448 bp->port.link_config,
8449 bp->link_params.speed_cap_mask);
8454 case PORT_FEATURE_LINK_SPEED_1G:
8455 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8456 bp->link_params.req_line_speed = SPEED_1000;
8457 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8460 BNX2X_ERR("NVRAM config error. "
8461 "Invalid link_config 0x%x"
8462 " speed_cap_mask 0x%x\n",
8463 bp->port.link_config,
8464 bp->link_params.speed_cap_mask);
8469 case PORT_FEATURE_LINK_SPEED_2_5G:
8470 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8471 bp->link_params.req_line_speed = SPEED_2500;
8472 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8475 BNX2X_ERR("NVRAM config error. "
8476 "Invalid link_config 0x%x"
8477 " speed_cap_mask 0x%x\n",
8478 bp->port.link_config,
8479 bp->link_params.speed_cap_mask);
8484 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8485 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8486 case PORT_FEATURE_LINK_SPEED_10G_KR:
8487 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8488 bp->link_params.req_line_speed = SPEED_10000;
8489 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8492 BNX2X_ERR("NVRAM config error. "
8493 "Invalid link_config 0x%x"
8494 " speed_cap_mask 0x%x\n",
8495 bp->port.link_config,
8496 bp->link_params.speed_cap_mask);
8502 BNX2X_ERR("NVRAM config error. "
8503 "BAD link speed link_config 0x%x\n",
8504 bp->port.link_config);
8505 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8506 bp->port.advertising = bp->port.supported;
8510 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8511 PORT_FEATURE_FLOW_CONTROL_MASK);
8512 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8513 !(bp->port.supported & SUPPORTED_Autoneg))
8514 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8516 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8517 " advertising 0x%x\n",
8518 bp->link_params.req_line_speed,
8519 bp->link_params.req_duplex,
8520 bp->link_params.req_flow_ctrl, bp->port.advertising);
8523 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8525 int port = BP_PORT(bp);
8531 bp->link_params.bp = bp;
8532 bp->link_params.port = port;
8534 bp->link_params.lane_config =
8535 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8536 bp->link_params.ext_phy_config =
8538 dev_info.port_hw_config[port].external_phy_config);
8539 /* BCM8727_NOC => BCM8727 no over current */
8540 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8541 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8542 bp->link_params.ext_phy_config &=
8543 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8544 bp->link_params.ext_phy_config |=
8545 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8546 bp->link_params.feature_config_flags |=
8547 FEATURE_CONFIG_BCM8727_NOC;
8550 bp->link_params.speed_cap_mask =
8552 dev_info.port_hw_config[port].speed_capability_mask);
8554 bp->port.link_config =
8555 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8557 /* Get the 4 lanes xgxs config rx and tx */
8558 for (i = 0; i < 2; i++) {
8560 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8561 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8562 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8565 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8566 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8567 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8570 /* If the device is capable of WoL, set the default state according
8573 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8574 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8575 (config & PORT_FEATURE_WOL_ENABLED));
8577 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8578 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8579 bp->link_params.lane_config,
8580 bp->link_params.ext_phy_config,
8581 bp->link_params.speed_cap_mask, bp->port.link_config);
8583 bp->link_params.switch_cfg |= (bp->port.link_config &
8584 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8585 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8587 bnx2x_link_settings_requested(bp);
8590 * If connected directly, work with the internal PHY, otherwise, work
8591 * with the external PHY
8593 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8594 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8595 bp->mdio.prtad = bp->link_params.phy_addr;
8597 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8598 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8600 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8602 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8603 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8604 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8605 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8606 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8607 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8608 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8609 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8610 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8611 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8614 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8616 int func = BP_FUNC(bp);
8620 bnx2x_get_common_hwinfo(bp);
8624 if (CHIP_IS_E1H(bp)) {
8626 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8628 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8629 FUNC_MF_CFG_E1HOV_TAG_MASK);
8630 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8632 BNX2X_DEV_INFO("%s function mode\n",
8633 IS_E1HMF(bp) ? "multi" : "single");
8636 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8638 FUNC_MF_CFG_E1HOV_TAG_MASK);
8639 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8641 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8643 func, bp->e1hov, bp->e1hov);
8645 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8646 " aborting\n", func);
8651 BNX2X_ERR("!!! VN %d in single function mode,"
8652 " aborting\n", BP_E1HVN(bp));
8658 if (!BP_NOMCP(bp)) {
8659 bnx2x_get_port_hwinfo(bp);
8661 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8662 DRV_MSG_SEQ_NUMBER_MASK);
8663 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8667 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8668 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8669 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8670 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8671 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8672 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8673 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8674 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8675 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8676 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8677 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8679 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8687 /* only supposed to happen on emulation/FPGA */
8688 BNX2X_ERR("warning random MAC workaround active\n");
8689 random_ether_addr(bp->dev->dev_addr);
8690 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8696 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8698 int func = BP_FUNC(bp);
8702 /* Disable interrupt handling until HW is initialized */
8703 atomic_set(&bp->intr_sem, 1);
8704 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8706 mutex_init(&bp->port.phy_mutex);
8708 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8709 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8711 rc = bnx2x_get_hwinfo(bp);
8713 /* need to reset chip if undi was active */
8715 bnx2x_undi_unload(bp);
8717 if (CHIP_REV_IS_FPGA(bp))
8718 printk(KERN_ERR PFX "FPGA detected\n");
8720 if (BP_NOMCP(bp) && (func == 0))
8722 "MCP disabled, must load devices in order!\n");
8724 /* Set multi queue mode */
8725 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8726 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8728 "Multi disabled since int_mode requested is not MSI-X\n");
8729 multi_mode = ETH_RSS_MODE_DISABLED;
8731 bp->multi_mode = multi_mode;
8736 bp->flags &= ~TPA_ENABLE_FLAG;
8737 bp->dev->features &= ~NETIF_F_LRO;
8739 bp->flags |= TPA_ENABLE_FLAG;
8740 bp->dev->features |= NETIF_F_LRO;
8744 bp->dropless_fc = 0;
8746 bp->dropless_fc = dropless_fc;
8750 bp->tx_ring_size = MAX_TX_AVAIL;
8751 bp->rx_ring_size = MAX_RX_AVAIL;
8758 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8759 bp->current_interval = (poll ? poll : timer_interval);
8761 init_timer(&bp->timer);
8762 bp->timer.expires = jiffies + bp->current_interval;
8763 bp->timer.data = (unsigned long) bp;
8764 bp->timer.function = bnx2x_timer;
8770 * ethtool service functions
8773 /* All ethtool functions called with rtnl_lock */
8775 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8777 struct bnx2x *bp = netdev_priv(dev);
8779 cmd->supported = bp->port.supported;
8780 cmd->advertising = bp->port.advertising;
8782 if (netif_carrier_ok(dev)) {
8783 cmd->speed = bp->link_vars.line_speed;
8784 cmd->duplex = bp->link_vars.duplex;
8786 cmd->speed = bp->link_params.req_line_speed;
8787 cmd->duplex = bp->link_params.req_duplex;
8792 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8793 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8794 if (vn_max_rate < cmd->speed)
8795 cmd->speed = vn_max_rate;
8798 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8800 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8802 switch (ext_phy_type) {
8803 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8804 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8805 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8806 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8807 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8808 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8809 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8810 cmd->port = PORT_FIBRE;
8813 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8814 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8815 cmd->port = PORT_TP;
8818 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8819 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8820 bp->link_params.ext_phy_config);
8824 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8825 bp->link_params.ext_phy_config);
8829 cmd->port = PORT_TP;
8831 cmd->phy_address = bp->mdio.prtad;
8832 cmd->transceiver = XCVR_INTERNAL;
8834 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8835 cmd->autoneg = AUTONEG_ENABLE;
8837 cmd->autoneg = AUTONEG_DISABLE;
8842 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8843 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8844 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8845 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8846 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8847 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8848 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8853 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8855 struct bnx2x *bp = netdev_priv(dev);
8861 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8862 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8863 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8864 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8865 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8866 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8867 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8869 if (cmd->autoneg == AUTONEG_ENABLE) {
8870 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8871 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8875 /* advertise the requested speed and duplex if supported */
8876 cmd->advertising &= bp->port.supported;
8878 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8879 bp->link_params.req_duplex = DUPLEX_FULL;
8880 bp->port.advertising |= (ADVERTISED_Autoneg |
8883 } else { /* forced speed */
8884 /* advertise the requested speed and duplex if supported */
8885 switch (cmd->speed) {
8887 if (cmd->duplex == DUPLEX_FULL) {
8888 if (!(bp->port.supported &
8889 SUPPORTED_10baseT_Full)) {
8891 "10M full not supported\n");
8895 advertising = (ADVERTISED_10baseT_Full |
8898 if (!(bp->port.supported &
8899 SUPPORTED_10baseT_Half)) {
8901 "10M half not supported\n");
8905 advertising = (ADVERTISED_10baseT_Half |
8911 if (cmd->duplex == DUPLEX_FULL) {
8912 if (!(bp->port.supported &
8913 SUPPORTED_100baseT_Full)) {
8915 "100M full not supported\n");
8919 advertising = (ADVERTISED_100baseT_Full |
8922 if (!(bp->port.supported &
8923 SUPPORTED_100baseT_Half)) {
8925 "100M half not supported\n");
8929 advertising = (ADVERTISED_100baseT_Half |
8935 if (cmd->duplex != DUPLEX_FULL) {
8936 DP(NETIF_MSG_LINK, "1G half not supported\n");
8940 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8941 DP(NETIF_MSG_LINK, "1G full not supported\n");
8945 advertising = (ADVERTISED_1000baseT_Full |
8950 if (cmd->duplex != DUPLEX_FULL) {
8952 "2.5G half not supported\n");
8956 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8958 "2.5G full not supported\n");
8962 advertising = (ADVERTISED_2500baseX_Full |
8967 if (cmd->duplex != DUPLEX_FULL) {
8968 DP(NETIF_MSG_LINK, "10G half not supported\n");
8972 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8973 DP(NETIF_MSG_LINK, "10G full not supported\n");
8977 advertising = (ADVERTISED_10000baseT_Full |
8982 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8986 bp->link_params.req_line_speed = cmd->speed;
8987 bp->link_params.req_duplex = cmd->duplex;
8988 bp->port.advertising = advertising;
8991 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8992 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8993 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8994 bp->port.advertising);
8996 if (netif_running(dev)) {
8997 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9004 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9005 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9007 static int bnx2x_get_regs_len(struct net_device *dev)
9009 struct bnx2x *bp = netdev_priv(dev);
9010 int regdump_len = 0;
9013 if (CHIP_IS_E1(bp)) {
9014 for (i = 0; i < REGS_COUNT; i++)
9015 if (IS_E1_ONLINE(reg_addrs[i].info))
9016 regdump_len += reg_addrs[i].size;
9018 for (i = 0; i < WREGS_COUNT_E1; i++)
9019 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9020 regdump_len += wreg_addrs_e1[i].size *
9021 (1 + wreg_addrs_e1[i].read_regs_count);
9024 for (i = 0; i < REGS_COUNT; i++)
9025 if (IS_E1H_ONLINE(reg_addrs[i].info))
9026 regdump_len += reg_addrs[i].size;
9028 for (i = 0; i < WREGS_COUNT_E1H; i++)
9029 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9030 regdump_len += wreg_addrs_e1h[i].size *
9031 (1 + wreg_addrs_e1h[i].read_regs_count);
9034 regdump_len += sizeof(struct dump_hdr);
9039 static void bnx2x_get_regs(struct net_device *dev,
9040 struct ethtool_regs *regs, void *_p)
9043 struct bnx2x *bp = netdev_priv(dev);
9044 struct dump_hdr dump_hdr = {0};
9047 memset(p, 0, regs->len);
9049 if (!netif_running(bp->dev))
9052 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9053 dump_hdr.dump_sign = dump_sign_all;
9054 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9055 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9056 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9057 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9058 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9060 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9061 p += dump_hdr.hdr_size + 1;
9063 if (CHIP_IS_E1(bp)) {
9064 for (i = 0; i < REGS_COUNT; i++)
9065 if (IS_E1_ONLINE(reg_addrs[i].info))
9066 for (j = 0; j < reg_addrs[i].size; j++)
9068 reg_addrs[i].addr + j*4);
9071 for (i = 0; i < REGS_COUNT; i++)
9072 if (IS_E1H_ONLINE(reg_addrs[i].info))
9073 for (j = 0; j < reg_addrs[i].size; j++)
9075 reg_addrs[i].addr + j*4);
9079 #define PHY_FW_VER_LEN 10
9081 static void bnx2x_get_drvinfo(struct net_device *dev,
9082 struct ethtool_drvinfo *info)
9084 struct bnx2x *bp = netdev_priv(dev);
9085 u8 phy_fw_ver[PHY_FW_VER_LEN];
9087 strcpy(info->driver, DRV_MODULE_NAME);
9088 strcpy(info->version, DRV_MODULE_VERSION);
9090 phy_fw_ver[0] = '\0';
9092 bnx2x_acquire_phy_lock(bp);
9093 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9094 (bp->state != BNX2X_STATE_CLOSED),
9095 phy_fw_ver, PHY_FW_VER_LEN);
9096 bnx2x_release_phy_lock(bp);
9099 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9100 (bp->common.bc_ver & 0xff0000) >> 16,
9101 (bp->common.bc_ver & 0xff00) >> 8,
9102 (bp->common.bc_ver & 0xff),
9103 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9104 strcpy(info->bus_info, pci_name(bp->pdev));
9105 info->n_stats = BNX2X_NUM_STATS;
9106 info->testinfo_len = BNX2X_NUM_TESTS;
9107 info->eedump_len = bp->common.flash_size;
9108 info->regdump_len = bnx2x_get_regs_len(dev);
9111 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9113 struct bnx2x *bp = netdev_priv(dev);
9115 if (bp->flags & NO_WOL_FLAG) {
9119 wol->supported = WAKE_MAGIC;
9121 wol->wolopts = WAKE_MAGIC;
9125 memset(&wol->sopass, 0, sizeof(wol->sopass));
9128 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9130 struct bnx2x *bp = netdev_priv(dev);
9132 if (wol->wolopts & ~WAKE_MAGIC)
9135 if (wol->wolopts & WAKE_MAGIC) {
9136 if (bp->flags & NO_WOL_FLAG)
9146 static u32 bnx2x_get_msglevel(struct net_device *dev)
9148 struct bnx2x *bp = netdev_priv(dev);
9150 return bp->msglevel;
9153 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9155 struct bnx2x *bp = netdev_priv(dev);
9157 if (capable(CAP_NET_ADMIN))
9158 bp->msglevel = level;
9161 static int bnx2x_nway_reset(struct net_device *dev)
9163 struct bnx2x *bp = netdev_priv(dev);
9168 if (netif_running(dev)) {
9169 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9176 static u32 bnx2x_get_link(struct net_device *dev)
9178 struct bnx2x *bp = netdev_priv(dev);
9180 return bp->link_vars.link_up;
9183 static int bnx2x_get_eeprom_len(struct net_device *dev)
9185 struct bnx2x *bp = netdev_priv(dev);
9187 return bp->common.flash_size;
9190 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9192 int port = BP_PORT(bp);
9196 /* adjust timeout for emulation/FPGA */
9197 count = NVRAM_TIMEOUT_COUNT;
9198 if (CHIP_REV_IS_SLOW(bp))
9201 /* request access to nvram interface */
9202 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9203 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9205 for (i = 0; i < count*10; i++) {
9206 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9207 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9213 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9214 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9221 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9223 int port = BP_PORT(bp);
9227 /* adjust timeout for emulation/FPGA */
9228 count = NVRAM_TIMEOUT_COUNT;
9229 if (CHIP_REV_IS_SLOW(bp))
9232 /* relinquish nvram interface */
9233 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9234 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9236 for (i = 0; i < count*10; i++) {
9237 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9238 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9244 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9245 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9252 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9256 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9258 /* enable both bits, even on read */
9259 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9260 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9261 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9264 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9268 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9270 /* disable both bits, even after read */
9271 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9272 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9273 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9276 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9282 /* build the command word */
9283 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9285 /* need to clear DONE bit separately */
9286 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9288 /* address of the NVRAM to read from */
9289 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9290 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9292 /* issue a read command */
9293 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9295 /* adjust timeout for emulation/FPGA */
9296 count = NVRAM_TIMEOUT_COUNT;
9297 if (CHIP_REV_IS_SLOW(bp))
9300 /* wait for completion */
9303 for (i = 0; i < count; i++) {
9305 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9307 if (val & MCPR_NVM_COMMAND_DONE) {
9308 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9309 /* we read nvram data in cpu order
9310 * but ethtool sees it as an array of bytes
9311 * converting to big-endian will do the work */
9312 *ret_val = cpu_to_be32(val);
9321 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9328 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9330 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9335 if (offset + buf_size > bp->common.flash_size) {
9336 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9337 " buf_size (0x%x) > flash_size (0x%x)\n",
9338 offset, buf_size, bp->common.flash_size);
9342 /* request access to nvram interface */
9343 rc = bnx2x_acquire_nvram_lock(bp);
9347 /* enable access to nvram interface */
9348 bnx2x_enable_nvram_access(bp);
9350 /* read the first word(s) */
9351 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9352 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9353 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9354 memcpy(ret_buf, &val, 4);
9356 /* advance to the next dword */
9357 offset += sizeof(u32);
9358 ret_buf += sizeof(u32);
9359 buf_size -= sizeof(u32);
9364 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9365 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9366 memcpy(ret_buf, &val, 4);
9369 /* disable access to nvram interface */
9370 bnx2x_disable_nvram_access(bp);
9371 bnx2x_release_nvram_lock(bp);
9376 static int bnx2x_get_eeprom(struct net_device *dev,
9377 struct ethtool_eeprom *eeprom, u8 *eebuf)
9379 struct bnx2x *bp = netdev_priv(dev);
9382 if (!netif_running(dev))
9385 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9386 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9387 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9388 eeprom->len, eeprom->len);
9390 /* parameters already validated in ethtool_get_eeprom */
9392 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9397 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9402 /* build the command word */
9403 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9405 /* need to clear DONE bit separately */
9406 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9408 /* write the data */
9409 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9411 /* address of the NVRAM to write to */
9412 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9413 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9415 /* issue the write command */
9416 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9418 /* adjust timeout for emulation/FPGA */
9419 count = NVRAM_TIMEOUT_COUNT;
9420 if (CHIP_REV_IS_SLOW(bp))
9423 /* wait for completion */
9425 for (i = 0; i < count; i++) {
9427 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9428 if (val & MCPR_NVM_COMMAND_DONE) {
9437 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9439 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9447 if (offset + buf_size > bp->common.flash_size) {
9448 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9449 " buf_size (0x%x) > flash_size (0x%x)\n",
9450 offset, buf_size, bp->common.flash_size);
9454 /* request access to nvram interface */
9455 rc = bnx2x_acquire_nvram_lock(bp);
9459 /* enable access to nvram interface */
9460 bnx2x_enable_nvram_access(bp);
9462 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9463 align_offset = (offset & ~0x03);
9464 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9467 val &= ~(0xff << BYTE_OFFSET(offset));
9468 val |= (*data_buf << BYTE_OFFSET(offset));
9470 /* nvram data is returned as an array of bytes
9471 * convert it back to cpu order */
9472 val = be32_to_cpu(val);
9474 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9478 /* disable access to nvram interface */
9479 bnx2x_disable_nvram_access(bp);
9480 bnx2x_release_nvram_lock(bp);
9485 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9493 if (buf_size == 1) /* ethtool */
9494 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9496 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9498 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9503 if (offset + buf_size > bp->common.flash_size) {
9504 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9505 " buf_size (0x%x) > flash_size (0x%x)\n",
9506 offset, buf_size, bp->common.flash_size);
9510 /* request access to nvram interface */
9511 rc = bnx2x_acquire_nvram_lock(bp);
9515 /* enable access to nvram interface */
9516 bnx2x_enable_nvram_access(bp);
9519 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9520 while ((written_so_far < buf_size) && (rc == 0)) {
9521 if (written_so_far == (buf_size - sizeof(u32)))
9522 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9523 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9524 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9525 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9526 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9528 memcpy(&val, data_buf, 4);
9530 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9532 /* advance to the next dword */
9533 offset += sizeof(u32);
9534 data_buf += sizeof(u32);
9535 written_so_far += sizeof(u32);
9539 /* disable access to nvram interface */
9540 bnx2x_disable_nvram_access(bp);
9541 bnx2x_release_nvram_lock(bp);
9546 static int bnx2x_set_eeprom(struct net_device *dev,
9547 struct ethtool_eeprom *eeprom, u8 *eebuf)
9549 struct bnx2x *bp = netdev_priv(dev);
9550 int port = BP_PORT(bp);
9553 if (!netif_running(dev))
9556 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9557 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9558 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9559 eeprom->len, eeprom->len);
9561 /* parameters already validated in ethtool_set_eeprom */
9563 /* PHY eeprom can be accessed only by the PMF */
9564 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9568 if (eeprom->magic == 0x50485950) {
9569 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9570 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9572 bnx2x_acquire_phy_lock(bp);
9573 rc |= bnx2x_link_reset(&bp->link_params,
9575 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9576 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9577 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9578 MISC_REGISTERS_GPIO_HIGH, port);
9579 bnx2x_release_phy_lock(bp);
9580 bnx2x_link_report(bp);
9582 } else if (eeprom->magic == 0x50485952) {
9583 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9584 if ((bp->state == BNX2X_STATE_OPEN) ||
9585 (bp->state == BNX2X_STATE_DISABLED)) {
9586 bnx2x_acquire_phy_lock(bp);
9587 rc |= bnx2x_link_reset(&bp->link_params,
9590 rc |= bnx2x_phy_init(&bp->link_params,
9592 bnx2x_release_phy_lock(bp);
9593 bnx2x_calc_fc_adv(bp);
9595 } else if (eeprom->magic == 0x53985943) {
9596 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9597 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9598 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9600 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9602 /* DSP Remove Download Mode */
9603 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9604 MISC_REGISTERS_GPIO_LOW, port);
9606 bnx2x_acquire_phy_lock(bp);
9608 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9610 /* wait 0.5 sec to allow it to run */
9612 bnx2x_ext_phy_hw_reset(bp, port);
9614 bnx2x_release_phy_lock(bp);
9617 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9622 static int bnx2x_get_coalesce(struct net_device *dev,
9623 struct ethtool_coalesce *coal)
9625 struct bnx2x *bp = netdev_priv(dev);
9627 memset(coal, 0, sizeof(struct ethtool_coalesce));
9629 coal->rx_coalesce_usecs = bp->rx_ticks;
9630 coal->tx_coalesce_usecs = bp->tx_ticks;
9635 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9636 static int bnx2x_set_coalesce(struct net_device *dev,
9637 struct ethtool_coalesce *coal)
9639 struct bnx2x *bp = netdev_priv(dev);
9641 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9642 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9643 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9645 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9646 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9647 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9649 if (netif_running(dev))
9650 bnx2x_update_coalesce(bp);
9655 static void bnx2x_get_ringparam(struct net_device *dev,
9656 struct ethtool_ringparam *ering)
9658 struct bnx2x *bp = netdev_priv(dev);
9660 ering->rx_max_pending = MAX_RX_AVAIL;
9661 ering->rx_mini_max_pending = 0;
9662 ering->rx_jumbo_max_pending = 0;
9664 ering->rx_pending = bp->rx_ring_size;
9665 ering->rx_mini_pending = 0;
9666 ering->rx_jumbo_pending = 0;
9668 ering->tx_max_pending = MAX_TX_AVAIL;
9669 ering->tx_pending = bp->tx_ring_size;
9672 static int bnx2x_set_ringparam(struct net_device *dev,
9673 struct ethtool_ringparam *ering)
9675 struct bnx2x *bp = netdev_priv(dev);
9678 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9679 (ering->tx_pending > MAX_TX_AVAIL) ||
9680 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9683 bp->rx_ring_size = ering->rx_pending;
9684 bp->tx_ring_size = ering->tx_pending;
9686 if (netif_running(dev)) {
9687 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9688 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9694 static void bnx2x_get_pauseparam(struct net_device *dev,
9695 struct ethtool_pauseparam *epause)
9697 struct bnx2x *bp = netdev_priv(dev);
9699 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9700 BNX2X_FLOW_CTRL_AUTO) &&
9701 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9703 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9704 BNX2X_FLOW_CTRL_RX);
9705 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9706 BNX2X_FLOW_CTRL_TX);
9708 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9709 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9710 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9713 static int bnx2x_set_pauseparam(struct net_device *dev,
9714 struct ethtool_pauseparam *epause)
9716 struct bnx2x *bp = netdev_priv(dev);
9721 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9722 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9723 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9725 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9727 if (epause->rx_pause)
9728 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9730 if (epause->tx_pause)
9731 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9733 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9734 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9736 if (epause->autoneg) {
9737 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9738 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9742 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9743 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9747 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9749 if (netif_running(dev)) {
9750 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9757 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9759 struct bnx2x *bp = netdev_priv(dev);
9763 /* TPA requires Rx CSUM offloading */
9764 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9765 if (!(dev->features & NETIF_F_LRO)) {
9766 dev->features |= NETIF_F_LRO;
9767 bp->flags |= TPA_ENABLE_FLAG;
9771 } else if (dev->features & NETIF_F_LRO) {
9772 dev->features &= ~NETIF_F_LRO;
9773 bp->flags &= ~TPA_ENABLE_FLAG;
9777 if (changed && netif_running(dev)) {
9778 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9779 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9785 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9787 struct bnx2x *bp = netdev_priv(dev);
9792 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9794 struct bnx2x *bp = netdev_priv(dev);
9799 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9800 TPA'ed packets will be discarded due to wrong TCP CSUM */
9802 u32 flags = ethtool_op_get_flags(dev);
9804 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9810 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9813 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9814 dev->features |= NETIF_F_TSO6;
9816 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9817 dev->features &= ~NETIF_F_TSO6;
9823 static const struct {
9824 char string[ETH_GSTRING_LEN];
9825 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9826 { "register_test (offline)" },
9827 { "memory_test (offline)" },
9828 { "loopback_test (offline)" },
9829 { "nvram_test (online)" },
9830 { "interrupt_test (online)" },
9831 { "link_test (online)" },
9832 { "idle check (online)" }
9835 static int bnx2x_test_registers(struct bnx2x *bp)
9837 int idx, i, rc = -ENODEV;
9839 int port = BP_PORT(bp);
9840 static const struct {
9845 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9846 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9847 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9848 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9849 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9850 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9851 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9852 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9853 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9854 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9855 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9856 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9857 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9858 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9859 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9860 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9861 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9862 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9863 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9864 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9865 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9866 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9867 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9868 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9869 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9870 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9871 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9872 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9873 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9874 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9875 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9876 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9877 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9878 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9879 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9880 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9881 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9883 { 0xffffffff, 0, 0x00000000 }
9886 if (!netif_running(bp->dev))
9889 /* Repeat the test twice:
9890 First by writing 0x00000000, second by writing 0xffffffff */
9891 for (idx = 0; idx < 2; idx++) {
9898 wr_val = 0xffffffff;
9902 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9903 u32 offset, mask, save_val, val;
9905 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9906 mask = reg_tbl[i].mask;
9908 save_val = REG_RD(bp, offset);
9910 REG_WR(bp, offset, wr_val);
9911 val = REG_RD(bp, offset);
9913 /* Restore the original register's value */
9914 REG_WR(bp, offset, save_val);
9916 /* verify that value is as expected value */
9917 if ((val & mask) != (wr_val & mask))
9928 static int bnx2x_test_memory(struct bnx2x *bp)
9930 int i, j, rc = -ENODEV;
9932 static const struct {
9936 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9937 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9938 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9939 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9940 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9941 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9942 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9946 static const struct {
9952 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9953 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9954 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9955 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9956 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9957 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9959 { NULL, 0xffffffff, 0, 0 }
9962 if (!netif_running(bp->dev))
9965 /* Go through all the memories */
9966 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9967 for (j = 0; j < mem_tbl[i].size; j++)
9968 REG_RD(bp, mem_tbl[i].offset + j*4);
9970 /* Check the parity status */
9971 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9972 val = REG_RD(bp, prty_tbl[i].offset);
9973 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9974 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9976 "%s is 0x%x\n", prty_tbl[i].name, val);
9987 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9992 while (bnx2x_link_test(bp) && cnt--)
9996 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9998 unsigned int pkt_size, num_pkts, i;
9999 struct sk_buff *skb;
10000 unsigned char *packet;
10001 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10002 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10003 u16 tx_start_idx, tx_idx;
10004 u16 rx_start_idx, rx_idx;
10005 u16 pkt_prod, bd_prod;
10006 struct sw_tx_bd *tx_buf;
10007 struct eth_tx_start_bd *tx_start_bd;
10008 struct eth_tx_parse_bd *pbd = NULL;
10009 dma_addr_t mapping;
10010 union eth_rx_cqe *cqe;
10012 struct sw_rx_bd *rx_buf;
10016 /* check the loopback mode */
10017 switch (loopback_mode) {
10018 case BNX2X_PHY_LOOPBACK:
10019 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10022 case BNX2X_MAC_LOOPBACK:
10023 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10024 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10030 /* prepare the loopback packet */
10031 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10032 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10033 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10036 goto test_loopback_exit;
10038 packet = skb_put(skb, pkt_size);
10039 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10040 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10041 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10042 for (i = ETH_HLEN; i < pkt_size; i++)
10043 packet[i] = (unsigned char) (i & 0xff);
10045 /* send the loopback packet */
10047 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10048 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10050 pkt_prod = fp_tx->tx_pkt_prod++;
10051 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10052 tx_buf->first_bd = fp_tx->tx_bd_prod;
10056 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10057 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10058 mapping = pci_map_single(bp->pdev, skb->data,
10059 skb_headlen(skb), PCI_DMA_TODEVICE);
10060 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10061 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10062 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10063 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10064 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10065 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10066 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10067 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10069 /* turn on parsing and get a BD */
10070 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10071 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10073 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10077 fp_tx->tx_db.data.prod += 2;
10079 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10084 fp_tx->tx_bd_prod += 2; /* start + pbd */
10085 bp->dev->trans_start = jiffies;
10089 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10090 if (tx_idx != tx_start_idx + num_pkts)
10091 goto test_loopback_exit;
10093 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10094 if (rx_idx != rx_start_idx + num_pkts)
10095 goto test_loopback_exit;
10097 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10098 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10099 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10100 goto test_loopback_rx_exit;
10102 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10103 if (len != pkt_size)
10104 goto test_loopback_rx_exit;
10106 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10108 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10109 for (i = ETH_HLEN; i < pkt_size; i++)
10110 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10111 goto test_loopback_rx_exit;
10115 test_loopback_rx_exit:
10117 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10118 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10119 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10120 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10122 /* Update producers */
10123 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10124 fp_rx->rx_sge_prod);
10126 test_loopback_exit:
10127 bp->link_params.loopback_mode = LOOPBACK_NONE;
10132 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10136 if (!netif_running(bp->dev))
10137 return BNX2X_LOOPBACK_FAILED;
10139 bnx2x_netif_stop(bp, 1);
10140 bnx2x_acquire_phy_lock(bp);
10142 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10144 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10145 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10148 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10150 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10151 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10154 bnx2x_release_phy_lock(bp);
10155 bnx2x_netif_start(bp);
10160 #define CRC32_RESIDUAL 0xdebb20e3
10162 static int bnx2x_test_nvram(struct bnx2x *bp)
10164 static const struct {
10168 { 0, 0x14 }, /* bootstrap */
10169 { 0x14, 0xec }, /* dir */
10170 { 0x100, 0x350 }, /* manuf_info */
10171 { 0x450, 0xf0 }, /* feature_info */
10172 { 0x640, 0x64 }, /* upgrade_key_info */
10174 { 0x708, 0x70 }, /* manuf_key_info */
10178 __be32 buf[0x350 / 4];
10179 u8 *data = (u8 *)buf;
10183 rc = bnx2x_nvram_read(bp, 0, data, 4);
10185 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10186 goto test_nvram_exit;
10189 magic = be32_to_cpu(buf[0]);
10190 if (magic != 0x669955aa) {
10191 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10193 goto test_nvram_exit;
10196 for (i = 0; nvram_tbl[i].size; i++) {
10198 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10199 nvram_tbl[i].size);
10201 DP(NETIF_MSG_PROBE,
10202 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10203 goto test_nvram_exit;
10206 crc = ether_crc_le(nvram_tbl[i].size, data);
10207 if (crc != CRC32_RESIDUAL) {
10208 DP(NETIF_MSG_PROBE,
10209 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10211 goto test_nvram_exit;
10219 static int bnx2x_test_intr(struct bnx2x *bp)
10221 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10224 if (!netif_running(bp->dev))
10227 config->hdr.length = 0;
10228 if (CHIP_IS_E1(bp))
10229 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10231 config->hdr.offset = BP_FUNC(bp);
10232 config->hdr.client_id = bp->fp->cl_id;
10233 config->hdr.reserved1 = 0;
10235 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10236 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10237 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10239 bp->set_mac_pending++;
10240 for (i = 0; i < 10; i++) {
10241 if (!bp->set_mac_pending)
10243 msleep_interruptible(10);
10252 static void bnx2x_self_test(struct net_device *dev,
10253 struct ethtool_test *etest, u64 *buf)
10255 struct bnx2x *bp = netdev_priv(dev);
10257 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10259 if (!netif_running(dev))
10262 /* offline tests are not supported in MF mode */
10264 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10266 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10267 int port = BP_PORT(bp);
10271 /* save current value of input enable for TX port IF */
10272 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10273 /* disable input for TX port IF */
10274 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10276 link_up = bp->link_vars.link_up;
10277 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10278 bnx2x_nic_load(bp, LOAD_DIAG);
10279 /* wait until link state is restored */
10280 bnx2x_wait_for_link(bp, link_up);
10282 if (bnx2x_test_registers(bp) != 0) {
10284 etest->flags |= ETH_TEST_FL_FAILED;
10286 if (bnx2x_test_memory(bp) != 0) {
10288 etest->flags |= ETH_TEST_FL_FAILED;
10290 buf[2] = bnx2x_test_loopback(bp, link_up);
10292 etest->flags |= ETH_TEST_FL_FAILED;
10294 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10296 /* restore input for TX port IF */
10297 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10299 bnx2x_nic_load(bp, LOAD_NORMAL);
10300 /* wait until link state is restored */
10301 bnx2x_wait_for_link(bp, link_up);
10303 if (bnx2x_test_nvram(bp) != 0) {
10305 etest->flags |= ETH_TEST_FL_FAILED;
10307 if (bnx2x_test_intr(bp) != 0) {
10309 etest->flags |= ETH_TEST_FL_FAILED;
10312 if (bnx2x_link_test(bp) != 0) {
10314 etest->flags |= ETH_TEST_FL_FAILED;
10317 #ifdef BNX2X_EXTRA_DEBUG
10318 bnx2x_panic_dump(bp);
10322 static const struct {
10325 u8 string[ETH_GSTRING_LEN];
10326 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10327 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10328 { Q_STATS_OFFSET32(error_bytes_received_hi),
10329 8, "[%d]: rx_error_bytes" },
10330 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10331 8, "[%d]: rx_ucast_packets" },
10332 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10333 8, "[%d]: rx_mcast_packets" },
10334 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10335 8, "[%d]: rx_bcast_packets" },
10336 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10337 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10338 4, "[%d]: rx_phy_ip_err_discards"},
10339 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10340 4, "[%d]: rx_skb_alloc_discard" },
10341 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10343 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10344 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10345 8, "[%d]: tx_packets" }
10348 static const struct {
10352 #define STATS_FLAGS_PORT 1
10353 #define STATS_FLAGS_FUNC 2
10354 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10355 u8 string[ETH_GSTRING_LEN];
10356 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10357 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10358 8, STATS_FLAGS_BOTH, "rx_bytes" },
10359 { STATS_OFFSET32(error_bytes_received_hi),
10360 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10361 { STATS_OFFSET32(total_unicast_packets_received_hi),
10362 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10363 { STATS_OFFSET32(total_multicast_packets_received_hi),
10364 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10365 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10366 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10367 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10368 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10369 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10370 8, STATS_FLAGS_PORT, "rx_align_errors" },
10371 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10372 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10373 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10374 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10375 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10376 8, STATS_FLAGS_PORT, "rx_fragments" },
10377 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10378 8, STATS_FLAGS_PORT, "rx_jabbers" },
10379 { STATS_OFFSET32(no_buff_discard_hi),
10380 8, STATS_FLAGS_BOTH, "rx_discards" },
10381 { STATS_OFFSET32(mac_filter_discard),
10382 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10383 { STATS_OFFSET32(xxoverflow_discard),
10384 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10385 { STATS_OFFSET32(brb_drop_hi),
10386 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10387 { STATS_OFFSET32(brb_truncate_hi),
10388 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10389 { STATS_OFFSET32(pause_frames_received_hi),
10390 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10391 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10392 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10393 { STATS_OFFSET32(nig_timer_max),
10394 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10395 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10396 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10397 { STATS_OFFSET32(rx_skb_alloc_failed),
10398 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10399 { STATS_OFFSET32(hw_csum_err),
10400 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10402 { STATS_OFFSET32(total_bytes_transmitted_hi),
10403 8, STATS_FLAGS_BOTH, "tx_bytes" },
10404 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10405 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10406 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10407 8, STATS_FLAGS_BOTH, "tx_packets" },
10408 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10409 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10410 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10411 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10412 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10413 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10414 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10415 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10416 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10417 8, STATS_FLAGS_PORT, "tx_deferred" },
10418 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10419 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10420 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10421 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10422 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10423 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10424 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10425 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10426 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10427 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10428 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10429 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10430 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10431 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10432 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10433 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10434 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10435 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10436 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10437 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10438 { STATS_OFFSET32(pause_frames_sent_hi),
10439 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10442 #define IS_PORT_STAT(i) \
10443 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10444 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10445 #define IS_E1HMF_MODE_STAT(bp) \
10446 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10448 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10450 struct bnx2x *bp = netdev_priv(dev);
10453 switch(stringset) {
10455 if (is_multi(bp)) {
10456 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10457 if (!IS_E1HMF_MODE_STAT(bp))
10458 num_stats += BNX2X_NUM_STATS;
10460 if (IS_E1HMF_MODE_STAT(bp)) {
10462 for (i = 0; i < BNX2X_NUM_STATS; i++)
10463 if (IS_FUNC_STAT(i))
10466 num_stats = BNX2X_NUM_STATS;
10471 return BNX2X_NUM_TESTS;
10478 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10480 struct bnx2x *bp = netdev_priv(dev);
10483 switch (stringset) {
10485 if (is_multi(bp)) {
10487 for_each_rx_queue(bp, i) {
10488 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10489 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10490 bnx2x_q_stats_arr[j].string, i);
10491 k += BNX2X_NUM_Q_STATS;
10493 if (IS_E1HMF_MODE_STAT(bp))
10495 for (j = 0; j < BNX2X_NUM_STATS; j++)
10496 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10497 bnx2x_stats_arr[j].string);
10499 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10500 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10502 strcpy(buf + j*ETH_GSTRING_LEN,
10503 bnx2x_stats_arr[i].string);
10510 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10515 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10516 struct ethtool_stats *stats, u64 *buf)
10518 struct bnx2x *bp = netdev_priv(dev);
10519 u32 *hw_stats, *offset;
10522 if (is_multi(bp)) {
10524 for_each_rx_queue(bp, i) {
10525 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10526 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10527 if (bnx2x_q_stats_arr[j].size == 0) {
10528 /* skip this counter */
10532 offset = (hw_stats +
10533 bnx2x_q_stats_arr[j].offset);
10534 if (bnx2x_q_stats_arr[j].size == 4) {
10535 /* 4-byte counter */
10536 buf[k + j] = (u64) *offset;
10539 /* 8-byte counter */
10540 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10542 k += BNX2X_NUM_Q_STATS;
10544 if (IS_E1HMF_MODE_STAT(bp))
10546 hw_stats = (u32 *)&bp->eth_stats;
10547 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10548 if (bnx2x_stats_arr[j].size == 0) {
10549 /* skip this counter */
10553 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10554 if (bnx2x_stats_arr[j].size == 4) {
10555 /* 4-byte counter */
10556 buf[k + j] = (u64) *offset;
10559 /* 8-byte counter */
10560 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10563 hw_stats = (u32 *)&bp->eth_stats;
10564 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10565 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10567 if (bnx2x_stats_arr[i].size == 0) {
10568 /* skip this counter */
10573 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10574 if (bnx2x_stats_arr[i].size == 4) {
10575 /* 4-byte counter */
10576 buf[j] = (u64) *offset;
10580 /* 8-byte counter */
10581 buf[j] = HILO_U64(*offset, *(offset + 1));
10587 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10589 struct bnx2x *bp = netdev_priv(dev);
10590 int port = BP_PORT(bp);
10593 if (!netif_running(dev))
10602 for (i = 0; i < (data * 2); i++) {
10604 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10605 bp->link_params.hw_led_mode,
10606 bp->link_params.chip_id);
10608 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10609 bp->link_params.hw_led_mode,
10610 bp->link_params.chip_id);
10612 msleep_interruptible(500);
10613 if (signal_pending(current))
10617 if (bp->link_vars.link_up)
10618 bnx2x_set_led(bp, port, LED_MODE_OPER,
10619 bp->link_vars.line_speed,
10620 bp->link_params.hw_led_mode,
10621 bp->link_params.chip_id);
10626 static const struct ethtool_ops bnx2x_ethtool_ops = {
10627 .get_settings = bnx2x_get_settings,
10628 .set_settings = bnx2x_set_settings,
10629 .get_drvinfo = bnx2x_get_drvinfo,
10630 .get_regs_len = bnx2x_get_regs_len,
10631 .get_regs = bnx2x_get_regs,
10632 .get_wol = bnx2x_get_wol,
10633 .set_wol = bnx2x_set_wol,
10634 .get_msglevel = bnx2x_get_msglevel,
10635 .set_msglevel = bnx2x_set_msglevel,
10636 .nway_reset = bnx2x_nway_reset,
10637 .get_link = bnx2x_get_link,
10638 .get_eeprom_len = bnx2x_get_eeprom_len,
10639 .get_eeprom = bnx2x_get_eeprom,
10640 .set_eeprom = bnx2x_set_eeprom,
10641 .get_coalesce = bnx2x_get_coalesce,
10642 .set_coalesce = bnx2x_set_coalesce,
10643 .get_ringparam = bnx2x_get_ringparam,
10644 .set_ringparam = bnx2x_set_ringparam,
10645 .get_pauseparam = bnx2x_get_pauseparam,
10646 .set_pauseparam = bnx2x_set_pauseparam,
10647 .get_rx_csum = bnx2x_get_rx_csum,
10648 .set_rx_csum = bnx2x_set_rx_csum,
10649 .get_tx_csum = ethtool_op_get_tx_csum,
10650 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10651 .set_flags = bnx2x_set_flags,
10652 .get_flags = ethtool_op_get_flags,
10653 .get_sg = ethtool_op_get_sg,
10654 .set_sg = ethtool_op_set_sg,
10655 .get_tso = ethtool_op_get_tso,
10656 .set_tso = bnx2x_set_tso,
10657 .self_test = bnx2x_self_test,
10658 .get_sset_count = bnx2x_get_sset_count,
10659 .get_strings = bnx2x_get_strings,
10660 .phys_id = bnx2x_phys_id,
10661 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10664 /* end of ethtool_ops */
10666 /****************************************************************************
10667 * General service functions
10668 ****************************************************************************/
10670 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10674 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10678 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10679 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10680 PCI_PM_CTRL_PME_STATUS));
10682 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10683 /* delay required during transition out of D3hot */
10688 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10692 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10694 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10697 /* No more memory access after this point until
10698 * device is brought back to D0.
10708 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10712 /* Tell compiler that status block fields can change */
10714 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10715 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10717 return (fp->rx_comp_cons != rx_cons_sb);
10721 * net_device service functions
10724 static int bnx2x_poll(struct napi_struct *napi, int budget)
10726 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10728 struct bnx2x *bp = fp->bp;
10731 #ifdef BNX2X_STOP_ON_ERROR
10732 if (unlikely(bp->panic))
10736 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10737 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10739 bnx2x_update_fpsb_idx(fp);
10741 if (bnx2x_has_rx_work(fp)) {
10742 work_done = bnx2x_rx_int(fp, budget);
10744 /* must not complete if we consumed full budget */
10745 if (work_done >= budget)
10749 /* bnx2x_has_rx_work() reads the status block, thus we need to
10750 * ensure that status block indices have been actually read
10751 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10752 * so that we won't write the "newer" value of the status block to IGU
10753 * (if there was a DMA right after bnx2x_has_rx_work and
10754 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10755 * may be postponed to right before bnx2x_ack_sb). In this case
10756 * there will never be another interrupt until there is another update
10757 * of the status block, while there is still unhandled work.
10761 if (!bnx2x_has_rx_work(fp)) {
10762 #ifdef BNX2X_STOP_ON_ERROR
10765 napi_complete(napi);
10767 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10768 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10769 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10770 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10778 /* we split the first BD into headers and data BDs
10779 * to ease the pain of our fellow microcode engineers
10780 * we use one mapping for both BDs
10781 * So far this has only been observed to happen
10782 * in Other Operating Systems(TM)
10784 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10785 struct bnx2x_fastpath *fp,
10786 struct sw_tx_bd *tx_buf,
10787 struct eth_tx_start_bd **tx_bd, u16 hlen,
10788 u16 bd_prod, int nbd)
10790 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10791 struct eth_tx_bd *d_tx_bd;
10792 dma_addr_t mapping;
10793 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10795 /* first fix first BD */
10796 h_tx_bd->nbd = cpu_to_le16(nbd);
10797 h_tx_bd->nbytes = cpu_to_le16(hlen);
10799 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10800 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10801 h_tx_bd->addr_lo, h_tx_bd->nbd);
10803 /* now get a new data BD
10804 * (after the pbd) and fill it */
10805 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10806 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10808 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10809 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10811 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10812 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10813 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10815 /* this marks the BD as one that has no individual mapping */
10816 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10818 DP(NETIF_MSG_TX_QUEUED,
10819 "TSO split data size is %d (%x:%x)\n",
10820 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10823 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10828 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10831 csum = (u16) ~csum_fold(csum_sub(csum,
10832 csum_partial(t_header - fix, fix, 0)));
10835 csum = (u16) ~csum_fold(csum_add(csum,
10836 csum_partial(t_header, -fix, 0)));
10838 return swab16(csum);
10841 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10845 if (skb->ip_summed != CHECKSUM_PARTIAL)
10849 if (skb->protocol == htons(ETH_P_IPV6)) {
10851 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10852 rc |= XMIT_CSUM_TCP;
10856 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10857 rc |= XMIT_CSUM_TCP;
10861 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10864 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10870 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10871 /* check if packet requires linearization (packet is too fragmented)
10872 no need to check fragmentation if page size > 8K (there will be no
10873 violation to FW restrictions) */
10874 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10879 int first_bd_sz = 0;
10881 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10882 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10884 if (xmit_type & XMIT_GSO) {
10885 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10886 /* Check if LSO packet needs to be copied:
10887 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10888 int wnd_size = MAX_FETCH_BD - 3;
10889 /* Number of windows to check */
10890 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10895 /* Headers length */
10896 hlen = (int)(skb_transport_header(skb) - skb->data) +
10899 /* Amount of data (w/o headers) on linear part of SKB*/
10900 first_bd_sz = skb_headlen(skb) - hlen;
10902 wnd_sum = first_bd_sz;
10904 /* Calculate the first sum - it's special */
10905 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10907 skb_shinfo(skb)->frags[frag_idx].size;
10909 /* If there was data on linear skb data - check it */
10910 if (first_bd_sz > 0) {
10911 if (unlikely(wnd_sum < lso_mss)) {
10916 wnd_sum -= first_bd_sz;
10919 /* Others are easier: run through the frag list and
10920 check all windows */
10921 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10923 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10925 if (unlikely(wnd_sum < lso_mss)) {
10930 skb_shinfo(skb)->frags[wnd_idx].size;
10933 /* in non-LSO too fragmented packet should always
10940 if (unlikely(to_copy))
10941 DP(NETIF_MSG_TX_QUEUED,
10942 "Linearization IS REQUIRED for %s packet. "
10943 "num_frags %d hlen %d first_bd_sz %d\n",
10944 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10945 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10951 /* called with netif_tx_lock
10952 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10953 * netif_wake_queue()
10955 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10957 struct bnx2x *bp = netdev_priv(dev);
10958 struct bnx2x_fastpath *fp, *fp_stat;
10959 struct netdev_queue *txq;
10960 struct sw_tx_bd *tx_buf;
10961 struct eth_tx_start_bd *tx_start_bd;
10962 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10963 struct eth_tx_parse_bd *pbd = NULL;
10964 u16 pkt_prod, bd_prod;
10966 dma_addr_t mapping;
10967 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10970 __le16 pkt_size = 0;
10972 #ifdef BNX2X_STOP_ON_ERROR
10973 if (unlikely(bp->panic))
10974 return NETDEV_TX_BUSY;
10977 fp_index = skb_get_queue_mapping(skb);
10978 txq = netdev_get_tx_queue(dev, fp_index);
10980 fp = &bp->fp[fp_index + bp->num_rx_queues];
10981 fp_stat = &bp->fp[fp_index];
10983 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10984 fp_stat->eth_q_stats.driver_xoff++;
10985 netif_tx_stop_queue(txq);
10986 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10987 return NETDEV_TX_BUSY;
10990 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10991 " gso type %x xmit_type %x\n",
10992 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10993 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10995 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10996 /* First, check if we need to linearize the skb (due to FW
10997 restrictions). No need to check fragmentation if page size > 8K
10998 (there will be no violation to FW restrictions) */
10999 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11000 /* Statistics of linearization */
11002 if (skb_linearize(skb) != 0) {
11003 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11004 "silently dropping this SKB\n");
11005 dev_kfree_skb_any(skb);
11006 return NETDEV_TX_OK;
11012 Please read carefully. First we use one BD which we mark as start,
11013 then we have a parsing info BD (used for TSO or xsum),
11014 and only then we have the rest of the TSO BDs.
11015 (don't forget to mark the last one as last,
11016 and to unmap only AFTER you write to the BD ...)
11017 And above all, all pdb sizes are in words - NOT DWORDS!
11020 pkt_prod = fp->tx_pkt_prod++;
11021 bd_prod = TX_BD(fp->tx_bd_prod);
11023 /* get a tx_buf and first BD */
11024 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11025 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11027 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11028 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11029 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11031 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11033 /* remember the first BD of the packet */
11034 tx_buf->first_bd = fp->tx_bd_prod;
11038 DP(NETIF_MSG_TX_QUEUED,
11039 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11040 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11043 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11044 (bp->flags & HW_VLAN_TX_FLAG)) {
11045 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11046 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11049 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11051 /* turn on parsing and get a BD */
11052 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11053 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11055 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11057 if (xmit_type & XMIT_CSUM) {
11058 hlen = (skb_network_header(skb) - skb->data) / 2;
11060 /* for now NS flag is not used in Linux */
11062 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11063 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11065 pbd->ip_hlen = (skb_transport_header(skb) -
11066 skb_network_header(skb)) / 2;
11068 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11070 pbd->total_hlen = cpu_to_le16(hlen);
11073 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11075 if (xmit_type & XMIT_CSUM_V4)
11076 tx_start_bd->bd_flags.as_bitfield |=
11077 ETH_TX_BD_FLAGS_IP_CSUM;
11079 tx_start_bd->bd_flags.as_bitfield |=
11080 ETH_TX_BD_FLAGS_IPV6;
11082 if (xmit_type & XMIT_CSUM_TCP) {
11083 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11086 s8 fix = SKB_CS_OFF(skb); /* signed! */
11088 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11090 DP(NETIF_MSG_TX_QUEUED,
11091 "hlen %d fix %d csum before fix %x\n",
11092 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11094 /* HW bug: fixup the CSUM */
11095 pbd->tcp_pseudo_csum =
11096 bnx2x_csum_fix(skb_transport_header(skb),
11099 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11100 pbd->tcp_pseudo_csum);
11104 mapping = pci_map_single(bp->pdev, skb->data,
11105 skb_headlen(skb), PCI_DMA_TODEVICE);
11107 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11108 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11109 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11110 tx_start_bd->nbd = cpu_to_le16(nbd);
11111 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11112 pkt_size = tx_start_bd->nbytes;
11114 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11115 " nbytes %d flags %x vlan %x\n",
11116 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11117 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11118 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11120 if (xmit_type & XMIT_GSO) {
11122 DP(NETIF_MSG_TX_QUEUED,
11123 "TSO packet len %d hlen %d total len %d tso size %d\n",
11124 skb->len, hlen, skb_headlen(skb),
11125 skb_shinfo(skb)->gso_size);
11127 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11129 if (unlikely(skb_headlen(skb) > hlen))
11130 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11131 hlen, bd_prod, ++nbd);
11133 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11134 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11135 pbd->tcp_flags = pbd_tcp_flags(skb);
11137 if (xmit_type & XMIT_GSO_V4) {
11138 pbd->ip_id = swab16(ip_hdr(skb)->id);
11139 pbd->tcp_pseudo_csum =
11140 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11141 ip_hdr(skb)->daddr,
11142 0, IPPROTO_TCP, 0));
11145 pbd->tcp_pseudo_csum =
11146 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11147 &ipv6_hdr(skb)->daddr,
11148 0, IPPROTO_TCP, 0));
11150 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11152 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11154 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11155 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11157 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11158 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11159 if (total_pkt_bd == NULL)
11160 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11162 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11163 frag->size, PCI_DMA_TODEVICE);
11165 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11166 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11167 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11168 le16_add_cpu(&pkt_size, frag->size);
11170 DP(NETIF_MSG_TX_QUEUED,
11171 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11172 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11173 le16_to_cpu(tx_data_bd->nbytes));
11176 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11178 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11180 /* now send a tx doorbell, counting the next BD
11181 * if the packet contains or ends with it
11183 if (TX_BD_POFF(bd_prod) < nbd)
11186 if (total_pkt_bd != NULL)
11187 total_pkt_bd->total_pkt_bytes = pkt_size;
11190 DP(NETIF_MSG_TX_QUEUED,
11191 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11192 " tcp_flags %x xsum %x seq %u hlen %u\n",
11193 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11194 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11195 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11197 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11200 * Make sure that the BD data is updated before updating the producer
11201 * since FW might read the BD right after the producer is updated.
11202 * This is only applicable for weak-ordered memory model archs such
11203 * as IA-64. The following barrier is also mandatory since FW will
11204 * assumes packets must have BDs.
11208 fp->tx_db.data.prod += nbd;
11210 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11214 fp->tx_bd_prod += nbd;
11216 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11217 netif_tx_stop_queue(txq);
11218 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11219 if we put Tx into XOFF state. */
11221 fp_stat->eth_q_stats.driver_xoff++;
11222 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11223 netif_tx_wake_queue(txq);
11227 return NETDEV_TX_OK;
11230 /* called with rtnl_lock */
11231 static int bnx2x_open(struct net_device *dev)
11233 struct bnx2x *bp = netdev_priv(dev);
11235 netif_carrier_off(dev);
11237 bnx2x_set_power_state(bp, PCI_D0);
11239 return bnx2x_nic_load(bp, LOAD_OPEN);
11242 /* called with rtnl_lock */
11243 static int bnx2x_close(struct net_device *dev)
11245 struct bnx2x *bp = netdev_priv(dev);
11247 /* Unload the driver, release IRQs */
11248 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11249 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11250 if (!CHIP_REV_IS_SLOW(bp))
11251 bnx2x_set_power_state(bp, PCI_D3hot);
11256 /* called with netif_tx_lock from dev_mcast.c */
11257 static void bnx2x_set_rx_mode(struct net_device *dev)
11259 struct bnx2x *bp = netdev_priv(dev);
11260 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11261 int port = BP_PORT(bp);
11263 if (bp->state != BNX2X_STATE_OPEN) {
11264 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11268 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11270 if (dev->flags & IFF_PROMISC)
11271 rx_mode = BNX2X_RX_MODE_PROMISC;
11273 else if ((dev->flags & IFF_ALLMULTI) ||
11274 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11275 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11277 else { /* some multicasts */
11278 if (CHIP_IS_E1(bp)) {
11279 int i, old, offset;
11280 struct dev_mc_list *mclist;
11281 struct mac_configuration_cmd *config =
11282 bnx2x_sp(bp, mcast_config);
11284 for (i = 0, mclist = dev->mc_list;
11285 mclist && (i < dev->mc_count);
11286 i++, mclist = mclist->next) {
11288 config->config_table[i].
11289 cam_entry.msb_mac_addr =
11290 swab16(*(u16 *)&mclist->dmi_addr[0]);
11291 config->config_table[i].
11292 cam_entry.middle_mac_addr =
11293 swab16(*(u16 *)&mclist->dmi_addr[2]);
11294 config->config_table[i].
11295 cam_entry.lsb_mac_addr =
11296 swab16(*(u16 *)&mclist->dmi_addr[4]);
11297 config->config_table[i].cam_entry.flags =
11299 config->config_table[i].
11300 target_table_entry.flags = 0;
11301 config->config_table[i].target_table_entry.
11302 clients_bit_vector =
11303 cpu_to_le32(1 << BP_L_ID(bp));
11304 config->config_table[i].
11305 target_table_entry.vlan_id = 0;
11308 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11309 config->config_table[i].
11310 cam_entry.msb_mac_addr,
11311 config->config_table[i].
11312 cam_entry.middle_mac_addr,
11313 config->config_table[i].
11314 cam_entry.lsb_mac_addr);
11316 old = config->hdr.length;
11318 for (; i < old; i++) {
11319 if (CAM_IS_INVALID(config->
11320 config_table[i])) {
11321 /* already invalidated */
11325 CAM_INVALIDATE(config->
11330 if (CHIP_REV_IS_SLOW(bp))
11331 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11333 offset = BNX2X_MAX_MULTICAST*(1 + port);
11335 config->hdr.length = i;
11336 config->hdr.offset = offset;
11337 config->hdr.client_id = bp->fp->cl_id;
11338 config->hdr.reserved1 = 0;
11340 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11341 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11342 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11345 /* Accept one or more multicasts */
11346 struct dev_mc_list *mclist;
11347 u32 mc_filter[MC_HASH_SIZE];
11348 u32 crc, bit, regidx;
11351 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11353 for (i = 0, mclist = dev->mc_list;
11354 mclist && (i < dev->mc_count);
11355 i++, mclist = mclist->next) {
11357 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11360 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11361 bit = (crc >> 24) & 0xff;
11364 mc_filter[regidx] |= (1 << bit);
11367 for (i = 0; i < MC_HASH_SIZE; i++)
11368 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11373 bp->rx_mode = rx_mode;
11374 bnx2x_set_storm_rx_mode(bp);
11377 /* called with rtnl_lock */
11378 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11380 struct sockaddr *addr = p;
11381 struct bnx2x *bp = netdev_priv(dev);
11383 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11386 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11387 if (netif_running(dev)) {
11388 if (CHIP_IS_E1(bp))
11389 bnx2x_set_mac_addr_e1(bp, 1);
11391 bnx2x_set_mac_addr_e1h(bp, 1);
11397 /* called with rtnl_lock */
11398 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11399 int devad, u16 addr)
11401 struct bnx2x *bp = netdev_priv(netdev);
11404 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11406 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11407 prtad, devad, addr);
11409 if (prtad != bp->mdio.prtad) {
11410 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11411 prtad, bp->mdio.prtad);
11415 /* The HW expects different devad if CL22 is used */
11416 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11418 bnx2x_acquire_phy_lock(bp);
11419 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11420 devad, addr, &value);
11421 bnx2x_release_phy_lock(bp);
11422 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11429 /* called with rtnl_lock */
11430 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11431 u16 addr, u16 value)
11433 struct bnx2x *bp = netdev_priv(netdev);
11434 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11437 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11438 " value 0x%x\n", prtad, devad, addr, value);
11440 if (prtad != bp->mdio.prtad) {
11441 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11442 prtad, bp->mdio.prtad);
11446 /* The HW expects different devad if CL22 is used */
11447 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11449 bnx2x_acquire_phy_lock(bp);
11450 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11451 devad, addr, value);
11452 bnx2x_release_phy_lock(bp);
11456 /* called with rtnl_lock */
11457 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11459 struct bnx2x *bp = netdev_priv(dev);
11460 struct mii_ioctl_data *mdio = if_mii(ifr);
11462 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11463 mdio->phy_id, mdio->reg_num, mdio->val_in);
11465 if (!netif_running(dev))
11468 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11471 /* called with rtnl_lock */
11472 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11474 struct bnx2x *bp = netdev_priv(dev);
11477 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11478 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11481 /* This does not race with packet allocation
11482 * because the actual alloc size is
11483 * only updated as part of load
11485 dev->mtu = new_mtu;
11487 if (netif_running(dev)) {
11488 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11489 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11495 static void bnx2x_tx_timeout(struct net_device *dev)
11497 struct bnx2x *bp = netdev_priv(dev);
11499 #ifdef BNX2X_STOP_ON_ERROR
11503 /* This allows the netif to be shutdown gracefully before resetting */
11504 schedule_work(&bp->reset_task);
11508 /* called with rtnl_lock */
11509 static void bnx2x_vlan_rx_register(struct net_device *dev,
11510 struct vlan_group *vlgrp)
11512 struct bnx2x *bp = netdev_priv(dev);
11516 /* Set flags according to the required capabilities */
11517 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11519 if (dev->features & NETIF_F_HW_VLAN_TX)
11520 bp->flags |= HW_VLAN_TX_FLAG;
11522 if (dev->features & NETIF_F_HW_VLAN_RX)
11523 bp->flags |= HW_VLAN_RX_FLAG;
11525 if (netif_running(dev))
11526 bnx2x_set_client_config(bp);
11531 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11532 static void poll_bnx2x(struct net_device *dev)
11534 struct bnx2x *bp = netdev_priv(dev);
11536 disable_irq(bp->pdev->irq);
11537 bnx2x_interrupt(bp->pdev->irq, dev);
11538 enable_irq(bp->pdev->irq);
11542 static const struct net_device_ops bnx2x_netdev_ops = {
11543 .ndo_open = bnx2x_open,
11544 .ndo_stop = bnx2x_close,
11545 .ndo_start_xmit = bnx2x_start_xmit,
11546 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11547 .ndo_set_mac_address = bnx2x_change_mac_addr,
11548 .ndo_validate_addr = eth_validate_addr,
11549 .ndo_do_ioctl = bnx2x_ioctl,
11550 .ndo_change_mtu = bnx2x_change_mtu,
11551 .ndo_tx_timeout = bnx2x_tx_timeout,
11553 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11555 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11556 .ndo_poll_controller = poll_bnx2x,
11560 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11561 struct net_device *dev)
11566 SET_NETDEV_DEV(dev, &pdev->dev);
11567 bp = netdev_priv(dev);
11572 bp->func = PCI_FUNC(pdev->devfn);
11574 rc = pci_enable_device(pdev);
11576 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11580 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11581 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11584 goto err_out_disable;
11587 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11588 printk(KERN_ERR PFX "Cannot find second PCI device"
11589 " base address, aborting\n");
11591 goto err_out_disable;
11594 if (atomic_read(&pdev->enable_cnt) == 1) {
11595 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11597 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11599 goto err_out_disable;
11602 pci_set_master(pdev);
11603 pci_save_state(pdev);
11606 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11607 if (bp->pm_cap == 0) {
11608 printk(KERN_ERR PFX "Cannot find power management"
11609 " capability, aborting\n");
11611 goto err_out_release;
11614 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11615 if (bp->pcie_cap == 0) {
11616 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11619 goto err_out_release;
11622 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11623 bp->flags |= USING_DAC_FLAG;
11624 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11625 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11626 " failed, aborting\n");
11628 goto err_out_release;
11631 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11632 printk(KERN_ERR PFX "System does not support DMA,"
11635 goto err_out_release;
11638 dev->mem_start = pci_resource_start(pdev, 0);
11639 dev->base_addr = dev->mem_start;
11640 dev->mem_end = pci_resource_end(pdev, 0);
11642 dev->irq = pdev->irq;
11644 bp->regview = pci_ioremap_bar(pdev, 0);
11645 if (!bp->regview) {
11646 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11648 goto err_out_release;
11651 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11652 min_t(u64, BNX2X_DB_SIZE,
11653 pci_resource_len(pdev, 2)));
11654 if (!bp->doorbells) {
11655 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11657 goto err_out_unmap;
11660 bnx2x_set_power_state(bp, PCI_D0);
11662 /* clean indirect addresses */
11663 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11664 PCICFG_VENDOR_ID_OFFSET);
11665 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11666 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11667 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11668 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11670 dev->watchdog_timeo = TX_TIMEOUT;
11672 dev->netdev_ops = &bnx2x_netdev_ops;
11673 dev->ethtool_ops = &bnx2x_ethtool_ops;
11674 dev->features |= NETIF_F_SG;
11675 dev->features |= NETIF_F_HW_CSUM;
11676 if (bp->flags & USING_DAC_FLAG)
11677 dev->features |= NETIF_F_HIGHDMA;
11678 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11679 dev->features |= NETIF_F_TSO6;
11681 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11682 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11684 dev->vlan_features |= NETIF_F_SG;
11685 dev->vlan_features |= NETIF_F_HW_CSUM;
11686 if (bp->flags & USING_DAC_FLAG)
11687 dev->vlan_features |= NETIF_F_HIGHDMA;
11688 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11689 dev->vlan_features |= NETIF_F_TSO6;
11692 /* get_port_hwinfo() will set prtad and mmds properly */
11693 bp->mdio.prtad = MDIO_PRTAD_NONE;
11695 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11696 bp->mdio.dev = dev;
11697 bp->mdio.mdio_read = bnx2x_mdio_read;
11698 bp->mdio.mdio_write = bnx2x_mdio_write;
11704 iounmap(bp->regview);
11705 bp->regview = NULL;
11707 if (bp->doorbells) {
11708 iounmap(bp->doorbells);
11709 bp->doorbells = NULL;
11713 if (atomic_read(&pdev->enable_cnt) == 1)
11714 pci_release_regions(pdev);
11717 pci_disable_device(pdev);
11718 pci_set_drvdata(pdev, NULL);
11724 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11725 int *width, int *speed)
11727 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11729 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11731 /* return value of 1=2.5GHz 2=5GHz */
11732 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11735 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11737 const struct firmware *firmware = bp->firmware;
11738 struct bnx2x_fw_file_hdr *fw_hdr;
11739 struct bnx2x_fw_file_section *sections;
11740 u32 offset, len, num_ops;
11745 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11748 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11749 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11751 /* Make sure none of the offsets and sizes make us read beyond
11752 * the end of the firmware data */
11753 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11754 offset = be32_to_cpu(sections[i].offset);
11755 len = be32_to_cpu(sections[i].len);
11756 if (offset + len > firmware->size) {
11757 printk(KERN_ERR PFX "Section %d length is out of "
11763 /* Likewise for the init_ops offsets */
11764 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11765 ops_offsets = (u16 *)(firmware->data + offset);
11766 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11768 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11769 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11770 printk(KERN_ERR PFX "Section offset %d is out of "
11776 /* Check FW version */
11777 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11778 fw_ver = firmware->data + offset;
11779 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11780 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11781 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11782 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11783 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11784 " Should be %d.%d.%d.%d\n",
11785 fw_ver[0], fw_ver[1], fw_ver[2],
11786 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11787 BCM_5710_FW_MINOR_VERSION,
11788 BCM_5710_FW_REVISION_VERSION,
11789 BCM_5710_FW_ENGINEERING_VERSION);
11796 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11798 const __be32 *source = (const __be32 *)_source;
11799 u32 *target = (u32 *)_target;
11802 for (i = 0; i < n/4; i++)
11803 target[i] = be32_to_cpu(source[i]);
11807 Ops array is stored in the following format:
11808 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11810 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11812 const __be32 *source = (const __be32 *)_source;
11813 struct raw_op *target = (struct raw_op *)_target;
11816 for (i = 0, j = 0; i < n/8; i++, j += 2) {
11817 tmp = be32_to_cpu(source[j]);
11818 target[i].op = (tmp >> 24) & 0xff;
11819 target[i].offset = tmp & 0xffffff;
11820 target[i].raw_data = be32_to_cpu(source[j+1]);
11824 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11826 const __be16 *source = (const __be16 *)_source;
11827 u16 *target = (u16 *)_target;
11830 for (i = 0; i < n/2; i++)
11831 target[i] = be16_to_cpu(source[i]);
11834 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11836 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11837 bp->arr = kmalloc(len, GFP_KERNEL); \
11839 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
11840 "for "#arr"\n", len); \
11843 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
11844 (u8 *)bp->arr, len); \
11847 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11849 char fw_file_name[40] = {0};
11850 struct bnx2x_fw_file_hdr *fw_hdr;
11853 /* Create a FW file name */
11854 if (CHIP_IS_E1(bp))
11855 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11857 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11859 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11860 BCM_5710_FW_MAJOR_VERSION,
11861 BCM_5710_FW_MINOR_VERSION,
11862 BCM_5710_FW_REVISION_VERSION,
11863 BCM_5710_FW_ENGINEERING_VERSION);
11865 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11867 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11869 printk(KERN_ERR PFX "Can't load firmware file %s\n",
11871 goto request_firmware_exit;
11874 rc = bnx2x_check_firmware(bp);
11876 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11877 goto request_firmware_exit;
11880 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11882 /* Initialize the pointers to the init arrays */
11884 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11887 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11890 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
11893 /* STORMs firmware */
11894 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11895 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11896 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
11897 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11898 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11899 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11900 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
11901 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11902 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11903 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11904 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
11905 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11906 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11907 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11908 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
11909 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11913 init_offsets_alloc_err:
11914 kfree(bp->init_ops);
11915 init_ops_alloc_err:
11916 kfree(bp->init_data);
11917 request_firmware_exit:
11918 release_firmware(bp->firmware);
11924 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11925 const struct pci_device_id *ent)
11927 struct net_device *dev = NULL;
11929 int pcie_width, pcie_speed;
11932 /* dev zeroed in init_etherdev */
11933 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11935 printk(KERN_ERR PFX "Cannot allocate net device\n");
11939 bp = netdev_priv(dev);
11940 bp->msglevel = debug;
11942 pci_set_drvdata(pdev, dev);
11944 rc = bnx2x_init_dev(pdev, dev);
11950 rc = bnx2x_init_bp(bp);
11952 goto init_one_exit;
11954 /* Set init arrays */
11955 rc = bnx2x_init_firmware(bp, &pdev->dev);
11957 printk(KERN_ERR PFX "Error loading firmware\n");
11958 goto init_one_exit;
11961 rc = register_netdev(dev);
11963 dev_err(&pdev->dev, "Cannot register net device\n");
11964 goto init_one_exit;
11967 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11968 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11969 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11970 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11971 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
11972 dev->base_addr, bp->pdev->irq);
11973 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11979 iounmap(bp->regview);
11982 iounmap(bp->doorbells);
11986 if (atomic_read(&pdev->enable_cnt) == 1)
11987 pci_release_regions(pdev);
11989 pci_disable_device(pdev);
11990 pci_set_drvdata(pdev, NULL);
11995 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11997 struct net_device *dev = pci_get_drvdata(pdev);
12001 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12004 bp = netdev_priv(dev);
12006 unregister_netdev(dev);
12008 kfree(bp->init_ops_offsets);
12009 kfree(bp->init_ops);
12010 kfree(bp->init_data);
12011 release_firmware(bp->firmware);
12014 iounmap(bp->regview);
12017 iounmap(bp->doorbells);
12021 if (atomic_read(&pdev->enable_cnt) == 1)
12022 pci_release_regions(pdev);
12024 pci_disable_device(pdev);
12025 pci_set_drvdata(pdev, NULL);
12028 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12030 struct net_device *dev = pci_get_drvdata(pdev);
12034 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12037 bp = netdev_priv(dev);
12041 pci_save_state(pdev);
12043 if (!netif_running(dev)) {
12048 netif_device_detach(dev);
12050 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12052 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12059 static int bnx2x_resume(struct pci_dev *pdev)
12061 struct net_device *dev = pci_get_drvdata(pdev);
12066 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12069 bp = netdev_priv(dev);
12073 pci_restore_state(pdev);
12075 if (!netif_running(dev)) {
12080 bnx2x_set_power_state(bp, PCI_D0);
12081 netif_device_attach(dev);
12083 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12090 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12094 bp->state = BNX2X_STATE_ERROR;
12096 bp->rx_mode = BNX2X_RX_MODE_NONE;
12098 bnx2x_netif_stop(bp, 0);
12100 del_timer_sync(&bp->timer);
12101 bp->stats_state = STATS_STATE_DISABLED;
12102 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12105 bnx2x_free_irq(bp);
12107 if (CHIP_IS_E1(bp)) {
12108 struct mac_configuration_cmd *config =
12109 bnx2x_sp(bp, mcast_config);
12111 for (i = 0; i < config->hdr.length; i++)
12112 CAM_INVALIDATE(config->config_table[i]);
12115 /* Free SKBs, SGEs, TPA pool and driver internals */
12116 bnx2x_free_skbs(bp);
12117 for_each_rx_queue(bp, i)
12118 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12119 for_each_rx_queue(bp, i)
12120 netif_napi_del(&bnx2x_fp(bp, i, napi));
12121 bnx2x_free_mem(bp);
12123 bp->state = BNX2X_STATE_CLOSED;
12125 netif_carrier_off(bp->dev);
12130 static void bnx2x_eeh_recover(struct bnx2x *bp)
12134 mutex_init(&bp->port.phy_mutex);
12136 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12137 bp->link_params.shmem_base = bp->common.shmem_base;
12138 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12140 if (!bp->common.shmem_base ||
12141 (bp->common.shmem_base < 0xA0000) ||
12142 (bp->common.shmem_base >= 0xC0000)) {
12143 BNX2X_DEV_INFO("MCP not active\n");
12144 bp->flags |= NO_MCP_FLAG;
12148 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12149 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12150 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12151 BNX2X_ERR("BAD MCP validity signature\n");
12153 if (!BP_NOMCP(bp)) {
12154 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12155 & DRV_MSG_SEQ_NUMBER_MASK);
12156 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12161 * bnx2x_io_error_detected - called when PCI error is detected
12162 * @pdev: Pointer to PCI device
12163 * @state: The current pci connection state
12165 * This function is called after a PCI bus error affecting
12166 * this device has been detected.
12168 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12169 pci_channel_state_t state)
12171 struct net_device *dev = pci_get_drvdata(pdev);
12172 struct bnx2x *bp = netdev_priv(dev);
12176 netif_device_detach(dev);
12178 if (state == pci_channel_io_perm_failure) {
12180 return PCI_ERS_RESULT_DISCONNECT;
12183 if (netif_running(dev))
12184 bnx2x_eeh_nic_unload(bp);
12186 pci_disable_device(pdev);
12190 /* Request a slot reset */
12191 return PCI_ERS_RESULT_NEED_RESET;
12195 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12196 * @pdev: Pointer to PCI device
12198 * Restart the card from scratch, as if from a cold-boot.
12200 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12202 struct net_device *dev = pci_get_drvdata(pdev);
12203 struct bnx2x *bp = netdev_priv(dev);
12207 if (pci_enable_device(pdev)) {
12208 dev_err(&pdev->dev,
12209 "Cannot re-enable PCI device after reset\n");
12211 return PCI_ERS_RESULT_DISCONNECT;
12214 pci_set_master(pdev);
12215 pci_restore_state(pdev);
12217 if (netif_running(dev))
12218 bnx2x_set_power_state(bp, PCI_D0);
12222 return PCI_ERS_RESULT_RECOVERED;
12226 * bnx2x_io_resume - called when traffic can start flowing again
12227 * @pdev: Pointer to PCI device
12229 * This callback is called when the error recovery driver tells us that
12230 * its OK to resume normal operation.
12232 static void bnx2x_io_resume(struct pci_dev *pdev)
12234 struct net_device *dev = pci_get_drvdata(pdev);
12235 struct bnx2x *bp = netdev_priv(dev);
12239 bnx2x_eeh_recover(bp);
12241 if (netif_running(dev))
12242 bnx2x_nic_load(bp, LOAD_NORMAL);
12244 netif_device_attach(dev);
12249 static struct pci_error_handlers bnx2x_err_handler = {
12250 .error_detected = bnx2x_io_error_detected,
12251 .slot_reset = bnx2x_io_slot_reset,
12252 .resume = bnx2x_io_resume,
12255 static struct pci_driver bnx2x_pci_driver = {
12256 .name = DRV_MODULE_NAME,
12257 .id_table = bnx2x_pci_tbl,
12258 .probe = bnx2x_init_one,
12259 .remove = __devexit_p(bnx2x_remove_one),
12260 .suspend = bnx2x_suspend,
12261 .resume = bnx2x_resume,
12262 .err_handler = &bnx2x_err_handler,
12265 static int __init bnx2x_init(void)
12269 printk(KERN_INFO "%s", version);
12271 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12272 if (bnx2x_wq == NULL) {
12273 printk(KERN_ERR PFX "Cannot create workqueue\n");
12277 ret = pci_register_driver(&bnx2x_pci_driver);
12279 printk(KERN_ERR PFX "Cannot register driver\n");
12280 destroy_workqueue(bnx2x_wq);
12285 static void __exit bnx2x_cleanup(void)
12287 pci_unregister_driver(&bnx2x_pci_driver);
12289 destroy_workqueue(bnx2x_wq);
12292 module_init(bnx2x_init);
12293 module_exit(bnx2x_cleanup);