1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.52.1"
60 #define DRV_MODULE_RELDATE "2009/08/12"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
154 * locking is done by mcp
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
176 static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
203 struct dmae_command dmae;
204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 memset(&dmae, 0, sizeof(struct dmae_command));
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 DMAE_CMD_ENDIANITY_DW_SWAP |
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248 mutex_lock(&bp->dmae_mutex);
252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
260 BNX2X_ERR("DMAE timeout!\n");
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
271 mutex_unlock(&bp->dmae_mutex);
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
276 struct dmae_command dmae;
277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 memset(&dmae, 0, sizeof(struct dmae_command));
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
299 DMAE_CMD_ENDIANITY_DW_SWAP |
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
320 mutex_lock(&bp->dmae_mutex);
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329 while (*wb_comp != DMAE_COMP_VAL) {
332 BNX2X_ERR("DMAE timeout!\n");
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
346 mutex_unlock(&bp->dmae_mutex);
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 REG_RD_DMAE(bp, reg, wb_data, 2);
381 return HILO_U64(wb_data[0], wb_data[1]);
385 static int bnx2x_mc_assert(struct bnx2x *bp)
389 u32 row0, row1, row2, row3;
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
506 static void bnx2x_fw_dump(struct bnx2x *bp)
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513 mark = ((mark + 0x3) & ~0x3);
514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
516 printk(KERN_ERR PFX);
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
522 printk(KERN_CONT "%s", (char *)data);
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
529 printk(KERN_CONT "%s", (char *)data);
531 printk(KERN_ERR PFX "end of fw dump\n");
534 static void bnx2x_panic_dump(struct bnx2x *bp)
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
542 BNX2X_ERR("begin crash dump -----------------\n");
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
553 for_each_rx_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i];
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
559 i, fp->rx_bd_prod, fp->rx_bd_cons,
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579 fp->status_blk->c_status_block.status_block_index,
580 fp->tx_db.data.prod);
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590 for (j = start; j != end; j = RX_BD(j + 1)) {
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
600 for (j = start; j != end; j = RX_SGE(j + 1)) {
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 BNX2X_ERR("end crash dump -----------------\n");
646 static void bnx2x_int_enable(struct bnx2x *bp)
648 int port = BP_PORT(bp);
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
673 REG_WR(bp, addr, val);
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
681 REG_WR(bp, addr, val);
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
693 /* enable nig and gpio3 attention */
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
702 /* Make sure that interrupts are indeed enabled from here on */
706 static void bnx2x_int_disable(struct bnx2x *bp)
708 int port = BP_PORT(bp);
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
720 /* flush all outstanding writes */
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
733 /* disable interrupt handling */
734 atomic_inc(&bp->intr_sem);
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
741 /* make sure all ISRs are done */
743 synchronize_irq(bp->msix_table[0].vector);
745 for_each_queue(bp, i)
746 synchronize_irq(bp->msix_table[i + offset].vector);
748 synchronize_irq(bp->pdev->irq);
750 /* make sure sp_task is not running */
751 cancel_delayed_work(&bp->sp_task);
752 flush_workqueue(bnx2x_wq);
758 * General service functions
761 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
762 u8 storm, u16 index, u8 op, u8 update)
764 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
765 COMMAND_REG_INT_ACK);
766 struct igu_ack_register igu_ack;
768 igu_ack.status_block_index = index;
769 igu_ack.sb_id_and_flags =
770 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
771 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
772 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
773 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
775 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
776 (*(u32 *)&igu_ack), hc_addr);
777 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
779 /* Make sure that ACK is written */
784 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
786 struct host_status_block *fpsb = fp->status_blk;
789 barrier(); /* status block is written to by the chip */
790 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
791 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
794 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
795 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
801 static u16 bnx2x_ack_int(struct bnx2x *bp)
803 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
804 COMMAND_REG_SIMD_MASK);
805 u32 result = REG_RD(bp, hc_addr);
807 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
815 * fast path service functions
818 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
820 /* Tell compiler that consumer and producer can change */
822 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
825 /* free skb in the packet ring at pos idx
826 * return idx of last bd freed
828 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
831 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
832 struct eth_tx_start_bd *tx_start_bd;
833 struct eth_tx_bd *tx_data_bd;
834 struct sk_buff *skb = tx_buf->skb;
835 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
838 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
842 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
843 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
844 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
845 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
848 #ifdef BNX2X_STOP_ON_ERROR
849 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
850 BNX2X_ERR("BAD nbd!\n");
854 new_cons = nbd + tx_buf->first_bd;
856 /* Get the next bd */
857 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859 /* Skip a parse bd... */
861 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863 /* ...and the TSO split header bd since they have no mapping */
864 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
872 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
873 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
874 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
875 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
882 dev_kfree_skb_any(skb);
883 tx_buf->first_bd = 0;
889 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
895 barrier(); /* Tell compiler that prod and cons can change */
896 prod = fp->tx_bd_prod;
897 cons = fp->tx_bd_cons;
899 /* NUM_TX_RINGS = number of "next-page" entries
900 It will be used as a threshold */
901 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
903 #ifdef BNX2X_STOP_ON_ERROR
905 WARN_ON(used > fp->bp->tx_ring_size);
906 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
909 return (s16)(fp->bp->tx_ring_size) - used;
912 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
914 struct bnx2x *bp = fp->bp;
915 struct netdev_queue *txq;
916 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
919 #ifdef BNX2X_STOP_ON_ERROR
920 if (unlikely(bp->panic))
924 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
925 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
926 sw_cons = fp->tx_pkt_cons;
928 while (sw_cons != hw_cons) {
931 pkt_cons = TX_BD(sw_cons);
933 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
935 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
936 hw_cons, sw_cons, pkt_cons);
938 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
940 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
943 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
948 fp->tx_pkt_cons = sw_cons;
949 fp->tx_bd_cons = bd_cons;
951 /* TBD need a thresh? */
952 if (unlikely(netif_tx_queue_stopped(txq))) {
954 /* Need to make the tx_bd_cons update visible to start_xmit()
955 * before checking for netif_tx_queue_stopped(). Without the
956 * memory barrier, there is a small possibility that
957 * start_xmit() will miss it and cause the queue to be stopped
962 if ((netif_tx_queue_stopped(txq)) &&
963 (bp->state == BNX2X_STATE_OPEN) &&
964 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
965 netif_tx_wake_queue(txq);
970 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
971 union eth_rx_cqe *rr_cqe)
973 struct bnx2x *bp = fp->bp;
974 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
975 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
978 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
979 fp->index, cid, command, bp->state,
980 rr_cqe->ramrod_cqe.ramrod_type);
985 switch (command | fp->state) {
986 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
987 BNX2X_FP_STATE_OPENING):
988 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
990 fp->state = BNX2X_FP_STATE_OPEN;
993 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
994 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
996 fp->state = BNX2X_FP_STATE_HALTED;
1000 BNX2X_ERR("unexpected MC reply (%d) "
1001 "fp->state is %x\n", command, fp->state);
1004 mb(); /* force bnx2x_wait_ramrod() to see the change */
1008 switch (command | bp->state) {
1009 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1010 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1011 bp->state = BNX2X_STATE_OPEN;
1014 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1015 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1016 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1017 fp->state = BNX2X_FP_STATE_HALTED;
1020 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1022 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1026 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1027 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1028 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1029 bp->set_mac_pending--;
1033 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1034 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1035 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1036 bp->set_mac_pending--;
1041 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1042 command, bp->state);
1045 mb(); /* force bnx2x_wait_ramrod() to see the change */
1048 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1051 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1052 struct page *page = sw_buf->page;
1053 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1055 /* Skip "next page" elements */
1059 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1060 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1061 __free_pages(page, PAGES_PER_SGE_SHIFT);
1063 sw_buf->page = NULL;
1068 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1069 struct bnx2x_fastpath *fp, int last)
1073 for (i = 0; i < last; i++)
1074 bnx2x_free_rx_sge(bp, fp, i);
1077 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1078 struct bnx2x_fastpath *fp, u16 index)
1080 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1081 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1082 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1085 if (unlikely(page == NULL))
1088 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1089 PCI_DMA_FROMDEVICE);
1090 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1091 __free_pages(page, PAGES_PER_SGE_SHIFT);
1095 sw_buf->page = page;
1096 pci_unmap_addr_set(sw_buf, mapping, mapping);
1098 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1099 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1104 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1105 struct bnx2x_fastpath *fp, u16 index)
1107 struct sk_buff *skb;
1108 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1109 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1112 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1113 if (unlikely(skb == NULL))
1116 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1117 PCI_DMA_FROMDEVICE);
1118 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1124 pci_unmap_addr_set(rx_buf, mapping, mapping);
1126 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1127 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1132 /* note that we are not allocating a new skb,
1133 * we are just moving one from cons to prod
1134 * we are not creating a new mapping,
1135 * so there is no need to check for dma_mapping_error().
1137 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1138 struct sk_buff *skb, u16 cons, u16 prod)
1140 struct bnx2x *bp = fp->bp;
1141 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1142 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1143 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1144 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1146 pci_dma_sync_single_for_device(bp->pdev,
1147 pci_unmap_addr(cons_rx_buf, mapping),
1148 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1150 prod_rx_buf->skb = cons_rx_buf->skb;
1151 pci_unmap_addr_set(prod_rx_buf, mapping,
1152 pci_unmap_addr(cons_rx_buf, mapping));
1153 *prod_bd = *cons_bd;
1156 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1159 u16 last_max = fp->last_max_sge;
1161 if (SUB_S16(idx, last_max) > 0)
1162 fp->last_max_sge = idx;
1165 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1169 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1170 int idx = RX_SGE_CNT * i - 1;
1172 for (j = 0; j < 2; j++) {
1173 SGE_MASK_CLEAR_BIT(fp, idx);
1179 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1180 struct eth_fast_path_rx_cqe *fp_cqe)
1182 struct bnx2x *bp = fp->bp;
1183 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1184 le16_to_cpu(fp_cqe->len_on_bd)) >>
1186 u16 last_max, last_elem, first_elem;
1193 /* First mark all used pages */
1194 for (i = 0; i < sge_len; i++)
1195 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1197 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1198 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1200 /* Here we assume that the last SGE index is the biggest */
1201 prefetch((void *)(fp->sge_mask));
1202 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1204 last_max = RX_SGE(fp->last_max_sge);
1205 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1206 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1208 /* If ring is not full */
1209 if (last_elem + 1 != first_elem)
1212 /* Now update the prod */
1213 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1214 if (likely(fp->sge_mask[i]))
1217 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1218 delta += RX_SGE_MASK_ELEM_SZ;
1222 fp->rx_sge_prod += delta;
1223 /* clear page-end entries */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1227 DP(NETIF_MSG_RX_STATUS,
1228 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1229 fp->last_max_sge, fp->rx_sge_prod);
1232 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1234 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1235 memset(fp->sge_mask, 0xff,
1236 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1238 /* Clear the two last indices in the page to 1:
1239 these are the indices that correspond to the "next" element,
1240 hence will never be indicated and should be removed from
1241 the calculations. */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1245 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1246 struct sk_buff *skb, u16 cons, u16 prod)
1248 struct bnx2x *bp = fp->bp;
1249 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1250 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1251 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1254 /* move empty skb from pool to prod and map it */
1255 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1256 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1257 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1258 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1260 /* move partial skb from cons to pool (don't unmap yet) */
1261 fp->tpa_pool[queue] = *cons_rx_buf;
1263 /* mark bin state as start - print error if current state != stop */
1264 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1265 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1267 fp->tpa_state[queue] = BNX2X_TPA_START;
1269 /* point prod_bd to new skb */
1270 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1271 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1273 #ifdef BNX2X_STOP_ON_ERROR
1274 fp->tpa_queue_used |= (1 << queue);
1275 #ifdef __powerpc64__
1276 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1278 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1280 fp->tpa_queue_used);
1284 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285 struct sk_buff *skb,
1286 struct eth_fast_path_rx_cqe *fp_cqe,
1289 struct sw_rx_page *rx_pg, old_rx_pg;
1290 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1291 u32 i, frag_len, frag_size, pages;
1295 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1296 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1298 /* This is needed in order to enable forwarding support */
1300 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1301 max(frag_size, (u32)len_on_bd));
1303 #ifdef BNX2X_STOP_ON_ERROR
1305 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1306 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1308 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1309 fp_cqe->pkt_len, len_on_bd);
1315 /* Run through the SGL and compose the fragmented skb */
1316 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1317 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1319 /* FW gives the indices of the SGE as if the ring is an array
1320 (meaning that "next" element will consume 2 indices) */
1321 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1322 rx_pg = &fp->rx_page_ring[sge_idx];
1325 /* If we fail to allocate a substitute page, we simply stop
1326 where we are and drop the whole packet */
1327 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1328 if (unlikely(err)) {
1329 fp->eth_q_stats.rx_skb_alloc_failed++;
1333 /* Unmap the page as we r going to pass it to the stack */
1334 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1335 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1337 /* Add one frag and update the appropriate fields in the skb */
1338 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1340 skb->data_len += frag_len;
1341 skb->truesize += frag_len;
1342 skb->len += frag_len;
1344 frag_size -= frag_len;
1350 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1351 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1354 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1355 struct sk_buff *skb = rx_buf->skb;
1357 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1359 /* Unmap skb in the pool anyway, as we are going to change
1360 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1362 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1363 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1365 if (likely(new_skb)) {
1366 /* fix ip xsum and give it to the stack */
1367 /* (no need to map the new skb) */
1370 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1371 PARSING_FLAGS_VLAN);
1372 int is_not_hwaccel_vlan_cqe =
1373 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1377 prefetch(((char *)(skb)) + 128);
1379 #ifdef BNX2X_STOP_ON_ERROR
1380 if (pad + len > bp->rx_buf_size) {
1381 BNX2X_ERR("skb_put is about to fail... "
1382 "pad %d len %d rx_buf_size %d\n",
1383 pad, len, bp->rx_buf_size);
1389 skb_reserve(skb, pad);
1392 skb->protocol = eth_type_trans(skb, bp->dev);
1393 skb->ip_summed = CHECKSUM_UNNECESSARY;
1398 iph = (struct iphdr *)skb->data;
1400 /* If there is no Rx VLAN offloading -
1401 take VLAN tag into an account */
1402 if (unlikely(is_not_hwaccel_vlan_cqe))
1403 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1406 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1409 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1410 &cqe->fast_path_cqe, cqe_idx)) {
1412 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1413 (!is_not_hwaccel_vlan_cqe))
1414 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1415 le16_to_cpu(cqe->fast_path_cqe.
1419 netif_receive_skb(skb);
1421 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1422 " - dropping packet!\n");
1427 /* put new skb in bin */
1428 fp->tpa_pool[queue].skb = new_skb;
1431 /* else drop the packet and keep the buffer in the bin */
1432 DP(NETIF_MSG_RX_STATUS,
1433 "Failed to allocate new skb - dropping packet!\n");
1434 fp->eth_q_stats.rx_skb_alloc_failed++;
1437 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1440 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1441 struct bnx2x_fastpath *fp,
1442 u16 bd_prod, u16 rx_comp_prod,
1445 struct ustorm_eth_rx_producers rx_prods = {0};
1448 /* Update producers */
1449 rx_prods.bd_prod = bd_prod;
1450 rx_prods.cqe_prod = rx_comp_prod;
1451 rx_prods.sge_prod = rx_sge_prod;
1454 * Make sure that the BD and SGE data is updated before updating the
1455 * producers since FW might read the BD/SGE right after the producer
1457 * This is only applicable for weak-ordered memory model archs such
1458 * as IA-64. The following barrier is also mandatory since FW will
1459 * assumes BDs must have buffers.
1463 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1464 REG_WR(bp, BAR_USTRORM_INTMEM +
1465 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1466 ((u32 *)&rx_prods)[i]);
1468 mmiowb(); /* keep prod updates ordered */
1470 DP(NETIF_MSG_RX_STATUS,
1471 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1472 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1475 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1477 struct bnx2x *bp = fp->bp;
1478 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1479 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1482 #ifdef BNX2X_STOP_ON_ERROR
1483 if (unlikely(bp->panic))
1487 /* CQ "next element" is of the size of the regular element,
1488 that's why it's ok here */
1489 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1490 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1493 bd_cons = fp->rx_bd_cons;
1494 bd_prod = fp->rx_bd_prod;
1495 bd_prod_fw = bd_prod;
1496 sw_comp_cons = fp->rx_comp_cons;
1497 sw_comp_prod = fp->rx_comp_prod;
1499 /* Memory barrier necessary as speculative reads of the rx
1500 * buffer can be ahead of the index in the status block
1504 DP(NETIF_MSG_RX_STATUS,
1505 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1506 fp->index, hw_comp_cons, sw_comp_cons);
1508 while (sw_comp_cons != hw_comp_cons) {
1509 struct sw_rx_bd *rx_buf = NULL;
1510 struct sk_buff *skb;
1511 union eth_rx_cqe *cqe;
1515 comp_ring_cons = RCQ_BD(sw_comp_cons);
1516 bd_prod = RX_BD(bd_prod);
1517 bd_cons = RX_BD(bd_cons);
1519 /* Prefetch the page containing the BD descriptor
1520 at producer's index. It will be needed when new skb is
1522 prefetch((void *)(PAGE_ALIGN((unsigned long)
1523 (&fp->rx_desc_ring[bd_prod])) -
1526 cqe = &fp->rx_comp_ring[comp_ring_cons];
1527 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1529 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1530 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1531 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1532 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1533 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1534 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1536 /* is this a slowpath msg? */
1537 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1538 bnx2x_sp_event(fp, cqe);
1541 /* this is an rx packet */
1543 rx_buf = &fp->rx_buf_ring[bd_cons];
1545 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1546 pad = cqe->fast_path_cqe.placement_offset;
1548 /* If CQE is marked both TPA_START and TPA_END
1549 it is a non-TPA CQE */
1550 if ((!fp->disable_tpa) &&
1551 (TPA_TYPE(cqe_fp_flags) !=
1552 (TPA_TYPE_START | TPA_TYPE_END))) {
1553 u16 queue = cqe->fast_path_cqe.queue_index;
1555 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1556 DP(NETIF_MSG_RX_STATUS,
1557 "calling tpa_start on queue %d\n",
1560 bnx2x_tpa_start(fp, queue, skb,
1565 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1566 DP(NETIF_MSG_RX_STATUS,
1567 "calling tpa_stop on queue %d\n",
1570 if (!BNX2X_RX_SUM_FIX(cqe))
1571 BNX2X_ERR("STOP on none TCP "
1574 /* This is a size of the linear data
1576 len = le16_to_cpu(cqe->fast_path_cqe.
1578 bnx2x_tpa_stop(bp, fp, queue, pad,
1579 len, cqe, comp_ring_cons);
1580 #ifdef BNX2X_STOP_ON_ERROR
1585 bnx2x_update_sge_prod(fp,
1586 &cqe->fast_path_cqe);
1591 pci_dma_sync_single_for_device(bp->pdev,
1592 pci_unmap_addr(rx_buf, mapping),
1593 pad + RX_COPY_THRESH,
1594 PCI_DMA_FROMDEVICE);
1596 prefetch(((char *)(skb)) + 128);
1598 /* is this an error packet? */
1599 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1600 DP(NETIF_MSG_RX_ERR,
1601 "ERROR flags %x rx packet %u\n",
1602 cqe_fp_flags, sw_comp_cons);
1603 fp->eth_q_stats.rx_err_discard_pkt++;
1607 /* Since we don't have a jumbo ring
1608 * copy small packets if mtu > 1500
1610 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1611 (len <= RX_COPY_THRESH)) {
1612 struct sk_buff *new_skb;
1614 new_skb = netdev_alloc_skb(bp->dev,
1616 if (new_skb == NULL) {
1617 DP(NETIF_MSG_RX_ERR,
1618 "ERROR packet dropped "
1619 "because of alloc failure\n");
1620 fp->eth_q_stats.rx_skb_alloc_failed++;
1625 skb_copy_from_linear_data_offset(skb, pad,
1626 new_skb->data + pad, len);
1627 skb_reserve(new_skb, pad);
1628 skb_put(new_skb, len);
1630 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1635 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1636 pci_unmap_single(bp->pdev,
1637 pci_unmap_addr(rx_buf, mapping),
1639 PCI_DMA_FROMDEVICE);
1640 skb_reserve(skb, pad);
1644 DP(NETIF_MSG_RX_ERR,
1645 "ERROR packet dropped because "
1646 "of alloc failure\n");
1647 fp->eth_q_stats.rx_skb_alloc_failed++;
1649 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1653 skb->protocol = eth_type_trans(skb, bp->dev);
1655 skb->ip_summed = CHECKSUM_NONE;
1657 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1658 skb->ip_summed = CHECKSUM_UNNECESSARY;
1660 fp->eth_q_stats.hw_csum_err++;
1664 skb_record_rx_queue(skb, fp->index);
1667 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1668 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1669 PARSING_FLAGS_VLAN))
1670 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1671 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1674 netif_receive_skb(skb);
1680 bd_cons = NEXT_RX_IDX(bd_cons);
1681 bd_prod = NEXT_RX_IDX(bd_prod);
1682 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1685 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1686 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1688 if (rx_pkt == budget)
1692 fp->rx_bd_cons = bd_cons;
1693 fp->rx_bd_prod = bd_prod_fw;
1694 fp->rx_comp_cons = sw_comp_cons;
1695 fp->rx_comp_prod = sw_comp_prod;
1697 /* Update producers */
1698 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1701 fp->rx_pkt += rx_pkt;
1707 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1709 struct bnx2x_fastpath *fp = fp_cookie;
1710 struct bnx2x *bp = fp->bp;
1712 /* Return here if interrupt is disabled */
1713 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1714 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1718 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1719 fp->index, fp->sb_id);
1720 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1722 #ifdef BNX2X_STOP_ON_ERROR
1723 if (unlikely(bp->panic))
1726 /* Handle Rx or Tx according to MSI-X vector */
1727 if (fp->is_rx_queue) {
1728 prefetch(fp->rx_cons_sb);
1729 prefetch(&fp->status_blk->u_status_block.status_block_index);
1731 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1734 prefetch(fp->tx_cons_sb);
1735 prefetch(&fp->status_blk->c_status_block.status_block_index);
1737 bnx2x_update_fpsb_idx(fp);
1741 /* Re-enable interrupts */
1742 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1743 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1744 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1745 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1751 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1753 struct bnx2x *bp = netdev_priv(dev_instance);
1754 u16 status = bnx2x_ack_int(bp);
1758 /* Return here if interrupt is shared and it's not for us */
1759 if (unlikely(status == 0)) {
1760 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1763 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1765 /* Return here if interrupt is disabled */
1766 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1767 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1771 #ifdef BNX2X_STOP_ON_ERROR
1772 if (unlikely(bp->panic))
1776 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1777 struct bnx2x_fastpath *fp = &bp->fp[i];
1779 mask = 0x2 << fp->sb_id;
1780 if (status & mask) {
1781 /* Handle Rx or Tx according to SB id */
1782 if (fp->is_rx_queue) {
1783 prefetch(fp->rx_cons_sb);
1784 prefetch(&fp->status_blk->u_status_block.
1785 status_block_index);
1787 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1790 prefetch(fp->tx_cons_sb);
1791 prefetch(&fp->status_blk->c_status_block.
1792 status_block_index);
1794 bnx2x_update_fpsb_idx(fp);
1798 /* Re-enable interrupts */
1799 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1800 le16_to_cpu(fp->fp_u_idx),
1802 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1803 le16_to_cpu(fp->fp_c_idx),
1811 if (unlikely(status & 0x1)) {
1812 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1820 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1826 /* end of fast path */
1828 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1833 * General service functions
1836 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1839 u32 resource_bit = (1 << resource);
1840 int func = BP_FUNC(bp);
1841 u32 hw_lock_control_reg;
1844 /* Validating that the resource is within range */
1845 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1847 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1848 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1853 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1855 hw_lock_control_reg =
1856 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1859 /* Validating that the resource is not already taken */
1860 lock_status = REG_RD(bp, hw_lock_control_reg);
1861 if (lock_status & resource_bit) {
1862 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1863 lock_status, resource_bit);
1867 /* Try for 5 second every 5ms */
1868 for (cnt = 0; cnt < 1000; cnt++) {
1869 /* Try to acquire the lock */
1870 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1871 lock_status = REG_RD(bp, hw_lock_control_reg);
1872 if (lock_status & resource_bit)
1877 DP(NETIF_MSG_HW, "Timeout\n");
1881 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1884 u32 resource_bit = (1 << resource);
1885 int func = BP_FUNC(bp);
1886 u32 hw_lock_control_reg;
1888 /* Validating that the resource is within range */
1889 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1891 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1892 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1897 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1899 hw_lock_control_reg =
1900 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1903 /* Validating that the resource is currently taken */
1904 lock_status = REG_RD(bp, hw_lock_control_reg);
1905 if (!(lock_status & resource_bit)) {
1906 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1907 lock_status, resource_bit);
1911 REG_WR(bp, hw_lock_control_reg, resource_bit);
1915 /* HW Lock for shared dual port PHYs */
1916 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1918 mutex_lock(&bp->port.phy_mutex);
1920 if (bp->port.need_hw_lock)
1921 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1924 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1926 if (bp->port.need_hw_lock)
1927 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1929 mutex_unlock(&bp->port.phy_mutex);
1932 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1934 /* The GPIO should be swapped if swap register is set and active */
1935 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1936 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1937 int gpio_shift = gpio_num +
1938 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1939 u32 gpio_mask = (1 << gpio_shift);
1943 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1944 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948 /* read GPIO value */
1949 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1951 /* get the requested pin value */
1952 if ((gpio_reg & gpio_mask) == gpio_mask)
1957 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1962 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1964 /* The GPIO should be swapped if swap register is set and active */
1965 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1966 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1967 int gpio_shift = gpio_num +
1968 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1969 u32 gpio_mask = (1 << gpio_shift);
1972 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1973 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1978 /* read GPIO and mask except the float bits */
1979 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1982 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1983 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1984 gpio_num, gpio_shift);
1985 /* clear FLOAT and set CLR */
1986 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1987 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1990 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1991 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1992 gpio_num, gpio_shift);
1993 /* clear FLOAT and set SET */
1994 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1995 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1998 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1999 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2000 gpio_num, gpio_shift);
2002 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2009 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2010 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2015 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2017 /* The GPIO should be swapped if swap register is set and active */
2018 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2019 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2020 int gpio_shift = gpio_num +
2021 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2022 u32 gpio_mask = (1 << gpio_shift);
2025 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2026 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2032 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2035 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2036 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2037 "output low\n", gpio_num, gpio_shift);
2038 /* clear SET and set CLR */
2039 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2040 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2043 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2044 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2045 "output high\n", gpio_num, gpio_shift);
2046 /* clear CLR and set SET */
2047 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2048 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2055 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2056 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2061 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2063 u32 spio_mask = (1 << spio_num);
2066 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2067 (spio_num > MISC_REGISTERS_SPIO_7)) {
2068 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2073 /* read SPIO and mask except the float bits */
2074 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2077 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2078 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2079 /* clear FLOAT and set CLR */
2080 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2081 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2084 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2085 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2086 /* clear FLOAT and set SET */
2087 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2088 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2091 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2092 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2094 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2101 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2102 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2107 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2109 switch (bp->link_vars.ieee_fc &
2110 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2111 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2112 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2116 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2117 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2121 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2122 bp->port.advertising |= ADVERTISED_Asym_Pause;
2126 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2132 static void bnx2x_link_report(struct bnx2x *bp)
2134 if (bp->state == BNX2X_STATE_DISABLED) {
2135 netif_carrier_off(bp->dev);
2136 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140 if (bp->link_vars.link_up) {
2141 if (bp->state == BNX2X_STATE_OPEN)
2142 netif_carrier_on(bp->dev);
2143 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2145 printk("%d Mbps ", bp->link_vars.line_speed);
2147 if (bp->link_vars.duplex == DUPLEX_FULL)
2148 printk("full duplex");
2150 printk("half duplex");
2152 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2153 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2154 printk(", receive ");
2155 if (bp->link_vars.flow_ctrl &
2157 printk("& transmit ");
2159 printk(", transmit ");
2161 printk("flow control ON");
2165 } else { /* link_down */
2166 netif_carrier_off(bp->dev);
2167 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2171 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2173 if (!BP_NOMCP(bp)) {
2176 /* Initialize link parameters structure variables */
2177 /* It is recommended to turn off RX FC for jumbo frames
2178 for better performance */
2179 if (bp->dev->mtu > 5000)
2180 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2182 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2184 bnx2x_acquire_phy_lock(bp);
2186 if (load_mode == LOAD_DIAG)
2187 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2189 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2191 bnx2x_release_phy_lock(bp);
2193 bnx2x_calc_fc_adv(bp);
2195 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2196 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2197 bnx2x_link_report(bp);
2202 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2206 static void bnx2x_link_set(struct bnx2x *bp)
2208 if (!BP_NOMCP(bp)) {
2209 bnx2x_acquire_phy_lock(bp);
2210 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2211 bnx2x_release_phy_lock(bp);
2213 bnx2x_calc_fc_adv(bp);
2215 BNX2X_ERR("Bootcode is missing - can not set link\n");
2218 static void bnx2x__link_reset(struct bnx2x *bp)
2220 if (!BP_NOMCP(bp)) {
2221 bnx2x_acquire_phy_lock(bp);
2222 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2223 bnx2x_release_phy_lock(bp);
2225 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2228 static u8 bnx2x_link_test(struct bnx2x *bp)
2232 bnx2x_acquire_phy_lock(bp);
2233 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2234 bnx2x_release_phy_lock(bp);
2239 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2241 u32 r_param = bp->link_vars.line_speed / 8;
2242 u32 fair_periodic_timeout_usec;
2245 memset(&(bp->cmng.rs_vars), 0,
2246 sizeof(struct rate_shaping_vars_per_port));
2247 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2249 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2250 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2252 /* this is the threshold below which no timer arming will occur
2253 1.25 coefficient is for the threshold to be a little bigger
2254 than the real time, to compensate for timer in-accuracy */
2255 bp->cmng.rs_vars.rs_threshold =
2256 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2258 /* resolution of fairness timer */
2259 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2260 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2261 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2263 /* this is the threshold below which we won't arm the timer anymore */
2264 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2266 /* we multiply by 1e3/8 to get bytes/msec.
2267 We don't want the credits to pass a credit
2268 of the t_fair*FAIR_MEM (algorithm resolution) */
2269 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2270 /* since each tick is 4 usec */
2271 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2274 /* Calculates the sum of vn_min_rates.
2275 It's needed for further normalizing of the min_rates.
2277 sum of vn_min_rates.
2279 0 - if all the min_rates are 0.
2280 In the later case fainess algorithm should be deactivated.
2281 If not all min_rates are zero then those that are zeroes will be set to 1.
2283 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2286 int port = BP_PORT(bp);
2289 bp->vn_weight_sum = 0;
2290 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2291 int func = 2*vn + port;
2292 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2293 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2294 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2296 /* Skip hidden vns */
2297 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2300 /* If min rate is zero - set it to 1 */
2302 vn_min_rate = DEF_MIN_RATE;
2306 bp->vn_weight_sum += vn_min_rate;
2309 /* ... only if all min rates are zeros - disable fairness */
2311 bp->vn_weight_sum = 0;
2314 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2316 struct rate_shaping_vars_per_vn m_rs_vn;
2317 struct fairness_vars_per_vn m_fair_vn;
2318 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319 u16 vn_min_rate, vn_max_rate;
2322 /* If function is hidden - set min and max to zeroes */
2323 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2328 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2329 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2330 /* If fairness is enabled (not all min rates are zeroes) and
2331 if current min rate is zero - set it to 1.
2332 This is a requirement of the algorithm. */
2333 if (bp->vn_weight_sum && (vn_min_rate == 0))
2334 vn_min_rate = DEF_MIN_RATE;
2335 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2336 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2340 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2341 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2343 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2344 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2346 /* global vn counter - maximal Mbps for this vn */
2347 m_rs_vn.vn_counter.rate = vn_max_rate;
2349 /* quota - number of bytes transmitted in this period */
2350 m_rs_vn.vn_counter.quota =
2351 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2353 if (bp->vn_weight_sum) {
2354 /* credit for each period of the fairness algorithm:
2355 number of bytes in T_FAIR (the vn share the port rate).
2356 vn_weight_sum should not be larger than 10000, thus
2357 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2359 m_fair_vn.vn_credit_delta =
2360 max((u32)(vn_min_rate * (T_FAIR_COEF /
2361 (8 * bp->vn_weight_sum))),
2362 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2363 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2364 m_fair_vn.vn_credit_delta);
2367 /* Store it to internal memory */
2368 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2369 REG_WR(bp, BAR_XSTRORM_INTMEM +
2370 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2371 ((u32 *)(&m_rs_vn))[i]);
2373 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2374 REG_WR(bp, BAR_XSTRORM_INTMEM +
2375 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2376 ((u32 *)(&m_fair_vn))[i]);
2380 /* This function is called upon link interrupt */
2381 static void bnx2x_link_attn(struct bnx2x *bp)
2383 /* Make sure that we are synced with the current statistics */
2384 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2386 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2388 if (bp->link_vars.link_up) {
2390 /* dropless flow control */
2391 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2392 int port = BP_PORT(bp);
2393 u32 pause_enabled = 0;
2395 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2398 REG_WR(bp, BAR_USTRORM_INTMEM +
2399 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2403 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2404 struct host_port_stats *pstats;
2406 pstats = bnx2x_sp(bp, port_stats);
2407 /* reset old bmac stats */
2408 memset(&(pstats->mac_stx[0]), 0,
2409 sizeof(struct mac_stx));
2411 if ((bp->state == BNX2X_STATE_OPEN) ||
2412 (bp->state == BNX2X_STATE_DISABLED))
2413 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2416 /* indicate link status */
2417 bnx2x_link_report(bp);
2420 int port = BP_PORT(bp);
2424 /* Set the attention towards other drivers on the same port */
2425 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2426 if (vn == BP_E1HVN(bp))
2429 func = ((vn << 1) | port);
2430 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2431 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2434 if (bp->link_vars.link_up) {
2437 /* Init rate shaping and fairness contexts */
2438 bnx2x_init_port_minmax(bp);
2440 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2441 bnx2x_init_vn_minmax(bp, 2*vn + port);
2443 /* Store it to internal memory */
2445 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2446 REG_WR(bp, BAR_XSTRORM_INTMEM +
2447 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2448 ((u32 *)(&bp->cmng))[i]);
2453 static void bnx2x__link_status_update(struct bnx2x *bp)
2455 int func = BP_FUNC(bp);
2457 if (bp->state != BNX2X_STATE_OPEN)
2460 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2462 if (bp->link_vars.link_up)
2463 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2465 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2467 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2468 bnx2x_calc_vn_weight_sum(bp);
2470 /* indicate link status */
2471 bnx2x_link_report(bp);
2474 static void bnx2x_pmf_update(struct bnx2x *bp)
2476 int port = BP_PORT(bp);
2480 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2482 /* enable nig attention */
2483 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2484 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2485 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2487 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2495 * General service functions
2498 /* send the MCP a request, block until there is a reply */
2499 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2501 int func = BP_FUNC(bp);
2502 u32 seq = ++bp->fw_seq;
2505 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2507 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2508 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2511 /* let the FW do it's magic ... */
2514 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2516 /* Give the FW up to 2 second (200*10ms) */
2517 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2519 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2520 cnt*delay, rc, seq);
2522 /* is this a reply to our command? */
2523 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2524 rc &= FW_MSG_CODE_MASK;
2527 BNX2X_ERR("FW failed to respond!\n");
2535 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2536 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2537 static void bnx2x_set_rx_mode(struct net_device *dev);
2539 static void bnx2x_e1h_disable(struct bnx2x *bp)
2541 int port = BP_PORT(bp);
2544 bp->rx_mode = BNX2X_RX_MODE_NONE;
2545 bnx2x_set_storm_rx_mode(bp);
2547 netif_tx_disable(bp->dev);
2548 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2550 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2552 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2554 for (i = 0; i < MC_HASH_SIZE; i++)
2555 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2557 netif_carrier_off(bp->dev);
2560 static void bnx2x_e1h_enable(struct bnx2x *bp)
2562 int port = BP_PORT(bp);
2564 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2566 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2568 /* Tx queue should be only reenabled */
2569 netif_tx_wake_all_queues(bp->dev);
2571 /* Initialize the receive filter. */
2572 bnx2x_set_rx_mode(bp->dev);
2575 static void bnx2x_update_min_max(struct bnx2x *bp)
2577 int port = BP_PORT(bp);
2580 /* Init rate shaping and fairness contexts */
2581 bnx2x_init_port_minmax(bp);
2583 bnx2x_calc_vn_weight_sum(bp);
2585 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2586 bnx2x_init_vn_minmax(bp, 2*vn + port);
2591 /* Set the attention towards other drivers on the same port */
2592 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2593 if (vn == BP_E1HVN(bp))
2596 func = ((vn << 1) | port);
2597 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2598 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2601 /* Store it to internal memory */
2602 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2603 REG_WR(bp, BAR_XSTRORM_INTMEM +
2604 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2605 ((u32 *)(&bp->cmng))[i]);
2609 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2611 int func = BP_FUNC(bp);
2613 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2614 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2616 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2618 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2619 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2620 bp->state = BNX2X_STATE_DISABLED;
2622 bnx2x_e1h_disable(bp);
2624 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2625 bp->state = BNX2X_STATE_OPEN;
2627 bnx2x_e1h_enable(bp);
2629 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2631 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2633 bnx2x_update_min_max(bp);
2634 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2637 /* Report results to MCP */
2639 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2641 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2644 /* must be called under the spq lock */
2645 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2647 struct eth_spe *next_spe = bp->spq_prod_bd;
2649 if (bp->spq_prod_bd == bp->spq_last_bd) {
2650 bp->spq_prod_bd = bp->spq;
2651 bp->spq_prod_idx = 0;
2652 DP(NETIF_MSG_TIMER, "end of spq\n");
2660 /* must be called under the spq lock */
2661 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2663 int func = BP_FUNC(bp);
2665 /* Make sure that BD data is updated before writing the producer */
2668 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2673 /* the slow path queue is odd since completions arrive on the fastpath ring */
2674 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2675 u32 data_hi, u32 data_lo, int common)
2677 struct eth_spe *spe;
2679 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2680 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2681 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2682 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2683 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2685 #ifdef BNX2X_STOP_ON_ERROR
2686 if (unlikely(bp->panic))
2690 spin_lock_bh(&bp->spq_lock);
2692 if (!bp->spq_left) {
2693 BNX2X_ERR("BUG! SPQ ring full!\n");
2694 spin_unlock_bh(&bp->spq_lock);
2699 spe = bnx2x_sp_get_next(bp);
2701 /* CID needs port number to be encoded int it */
2702 spe->hdr.conn_and_cmd_data =
2703 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2705 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2708 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2710 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2711 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2715 bnx2x_sp_prod_update(bp);
2716 spin_unlock_bh(&bp->spq_lock);
2720 /* acquire split MCP access lock register */
2721 static int bnx2x_acquire_alr(struct bnx2x *bp)
2728 for (j = 0; j < i*10; j++) {
2730 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2731 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2732 if (val & (1L << 31))
2737 if (!(val & (1L << 31))) {
2738 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2745 /* release split MCP access lock register */
2746 static void bnx2x_release_alr(struct bnx2x *bp)
2750 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2753 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2755 struct host_def_status_block *def_sb = bp->def_status_blk;
2758 barrier(); /* status block is written to by the chip */
2759 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2760 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2763 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2764 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2767 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2768 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2771 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2772 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2775 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2776 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2783 * slow path service functions
2786 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2788 int port = BP_PORT(bp);
2789 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2790 COMMAND_REG_ATTN_BITS_SET);
2791 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2792 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2793 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2794 NIG_REG_MASK_INTERRUPT_PORT0;
2798 if (bp->attn_state & asserted)
2799 BNX2X_ERR("IGU ERROR\n");
2801 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2802 aeu_mask = REG_RD(bp, aeu_addr);
2804 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2805 aeu_mask, asserted);
2806 aeu_mask &= ~(asserted & 0xff);
2807 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2809 REG_WR(bp, aeu_addr, aeu_mask);
2810 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2812 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2813 bp->attn_state |= asserted;
2814 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2816 if (asserted & ATTN_HARD_WIRED_MASK) {
2817 if (asserted & ATTN_NIG_FOR_FUNC) {
2819 bnx2x_acquire_phy_lock(bp);
2821 /* save nig interrupt mask */
2822 nig_mask = REG_RD(bp, nig_int_mask_addr);
2823 REG_WR(bp, nig_int_mask_addr, 0);
2825 bnx2x_link_attn(bp);
2827 /* handle unicore attn? */
2829 if (asserted & ATTN_SW_TIMER_4_FUNC)
2830 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2832 if (asserted & GPIO_2_FUNC)
2833 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2835 if (asserted & GPIO_3_FUNC)
2836 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2838 if (asserted & GPIO_4_FUNC)
2839 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2842 if (asserted & ATTN_GENERAL_ATTN_1) {
2843 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2844 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2846 if (asserted & ATTN_GENERAL_ATTN_2) {
2847 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2848 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2850 if (asserted & ATTN_GENERAL_ATTN_3) {
2851 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2852 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2855 if (asserted & ATTN_GENERAL_ATTN_4) {
2856 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2857 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2859 if (asserted & ATTN_GENERAL_ATTN_5) {
2860 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2861 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2863 if (asserted & ATTN_GENERAL_ATTN_6) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2869 } /* if hardwired */
2871 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2873 REG_WR(bp, hc_addr, asserted);
2875 /* now set back the mask */
2876 if (asserted & ATTN_NIG_FOR_FUNC) {
2877 REG_WR(bp, nig_int_mask_addr, nig_mask);
2878 bnx2x_release_phy_lock(bp);
2882 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2884 int port = BP_PORT(bp);
2886 /* mark the failure */
2887 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2888 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2889 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2890 bp->link_params.ext_phy_config);
2892 /* log the failure */
2893 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2894 " the driver to shutdown the card to prevent permanent"
2895 " damage. Please contact Dell Support for assistance\n",
2899 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2901 int port = BP_PORT(bp);
2903 u32 val, swap_val, swap_override;
2905 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2906 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2908 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2910 val = REG_RD(bp, reg_offset);
2911 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2912 REG_WR(bp, reg_offset, val);
2914 BNX2X_ERR("SPIO5 hw attention\n");
2916 /* Fan failure attention */
2917 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2918 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2919 /* Low power mode is controlled by GPIO 2 */
2920 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2921 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2922 /* The PHY reset is controlled by GPIO 1 */
2923 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2924 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2927 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2928 /* The PHY reset is controlled by GPIO 1 */
2929 /* fake the port number to cancel the swap done in
2931 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2932 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2933 port = (swap_val && swap_override) ^ 1;
2934 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2935 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2941 bnx2x_fan_failure(bp);
2944 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2945 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2946 bnx2x_acquire_phy_lock(bp);
2947 bnx2x_handle_module_detect_int(&bp->link_params);
2948 bnx2x_release_phy_lock(bp);
2951 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2953 val = REG_RD(bp, reg_offset);
2954 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2955 REG_WR(bp, reg_offset, val);
2957 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2958 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2963 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2967 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2969 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2970 BNX2X_ERR("DB hw attention 0x%x\n", val);
2971 /* DORQ discard attention */
2973 BNX2X_ERR("FATAL error from DORQ\n");
2976 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2978 int port = BP_PORT(bp);
2981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2986 REG_WR(bp, reg_offset, val);
2988 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2989 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2994 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2998 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3000 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3001 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3002 /* CFC error attention */
3004 BNX2X_ERR("FATAL error from CFC\n");
3007 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3009 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3010 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3011 /* RQ_USDMDP_FIFO_OVERFLOW */
3013 BNX2X_ERR("FATAL error from PXP\n");
3016 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3018 int port = BP_PORT(bp);
3021 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3022 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3024 val = REG_RD(bp, reg_offset);
3025 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3026 REG_WR(bp, reg_offset, val);
3028 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3029 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3034 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3038 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3040 if (attn & BNX2X_PMF_LINK_ASSERT) {
3041 int func = BP_FUNC(bp);
3043 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3044 val = SHMEM_RD(bp, func_mb[func].drv_status);
3045 if (val & DRV_STATUS_DCC_EVENT_MASK)
3047 (val & DRV_STATUS_DCC_EVENT_MASK));
3048 bnx2x__link_status_update(bp);
3049 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3050 bnx2x_pmf_update(bp);
3052 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3054 BNX2X_ERR("MC assert!\n");
3055 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3056 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3057 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3058 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3061 } else if (attn & BNX2X_MCP_ASSERT) {
3063 BNX2X_ERR("MCP assert!\n");
3064 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3068 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3071 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3072 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3073 if (attn & BNX2X_GRC_TIMEOUT) {
3074 val = CHIP_IS_E1H(bp) ?
3075 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3076 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3078 if (attn & BNX2X_GRC_RSV) {
3079 val = CHIP_IS_E1H(bp) ?
3080 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3081 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3083 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3087 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3089 struct attn_route attn;
3090 struct attn_route group_mask;
3091 int port = BP_PORT(bp);
3097 /* need to take HW lock because MCP or other port might also
3098 try to handle this event */
3099 bnx2x_acquire_alr(bp);
3101 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3102 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3103 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3104 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3105 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3106 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3108 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3109 if (deasserted & (1 << index)) {
3110 group_mask = bp->attn_group[index];
3112 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3113 index, group_mask.sig[0], group_mask.sig[1],
3114 group_mask.sig[2], group_mask.sig[3]);
3116 bnx2x_attn_int_deasserted3(bp,
3117 attn.sig[3] & group_mask.sig[3]);
3118 bnx2x_attn_int_deasserted1(bp,
3119 attn.sig[1] & group_mask.sig[1]);
3120 bnx2x_attn_int_deasserted2(bp,
3121 attn.sig[2] & group_mask.sig[2]);
3122 bnx2x_attn_int_deasserted0(bp,
3123 attn.sig[0] & group_mask.sig[0]);
3125 if ((attn.sig[0] & group_mask.sig[0] &
3126 HW_PRTY_ASSERT_SET_0) ||
3127 (attn.sig[1] & group_mask.sig[1] &
3128 HW_PRTY_ASSERT_SET_1) ||
3129 (attn.sig[2] & group_mask.sig[2] &
3130 HW_PRTY_ASSERT_SET_2))
3131 BNX2X_ERR("FATAL HW block parity attention\n");
3135 bnx2x_release_alr(bp);
3137 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3140 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3142 REG_WR(bp, reg_addr, val);
3144 if (~bp->attn_state & deasserted)
3145 BNX2X_ERR("IGU ERROR\n");
3147 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3148 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3150 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3151 aeu_mask = REG_RD(bp, reg_addr);
3153 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3154 aeu_mask, deasserted);
3155 aeu_mask |= (deasserted & 0xff);
3156 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3158 REG_WR(bp, reg_addr, aeu_mask);
3159 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3162 bp->attn_state &= ~deasserted;
3163 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3166 static void bnx2x_attn_int(struct bnx2x *bp)
3168 /* read local copy of bits */
3169 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3171 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3173 u32 attn_state = bp->attn_state;
3175 /* look for changed bits */
3176 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3177 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3180 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3181 attn_bits, attn_ack, asserted, deasserted);
3183 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3184 BNX2X_ERR("BAD attention state\n");
3186 /* handle bits that were raised */
3188 bnx2x_attn_int_asserted(bp, asserted);
3191 bnx2x_attn_int_deasserted(bp, deasserted);
3194 static void bnx2x_sp_task(struct work_struct *work)
3196 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3200 /* Return here if interrupt is disabled */
3201 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3202 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3206 status = bnx2x_update_dsb_idx(bp);
3207 /* if (status == 0) */
3208 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3210 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3216 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3218 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3220 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3222 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3224 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3229 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3231 struct net_device *dev = dev_instance;
3232 struct bnx2x *bp = netdev_priv(dev);
3234 /* Return here if interrupt is disabled */
3235 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3236 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3240 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3242 #ifdef BNX2X_STOP_ON_ERROR
3243 if (unlikely(bp->panic))
3247 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3252 /* end of slow path */
3256 /****************************************************************************
3258 ****************************************************************************/
3260 /* sum[hi:lo] += add[hi:lo] */
3261 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3264 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3267 /* difference = minuend - subtrahend */
3268 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3270 if (m_lo < s_lo) { \
3272 d_hi = m_hi - s_hi; \
3274 /* we can 'loan' 1 */ \
3276 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3278 /* m_hi <= s_hi */ \
3283 /* m_lo >= s_lo */ \
3284 if (m_hi < s_hi) { \
3288 /* m_hi >= s_hi */ \
3289 d_hi = m_hi - s_hi; \
3290 d_lo = m_lo - s_lo; \
3295 #define UPDATE_STAT64(s, t) \
3297 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3298 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3299 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3300 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3301 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3302 pstats->mac_stx[1].t##_lo, diff.lo); \
3305 #define UPDATE_STAT64_NIG(s, t) \
3307 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3308 diff.lo, new->s##_lo, old->s##_lo); \
3309 ADD_64(estats->t##_hi, diff.hi, \
3310 estats->t##_lo, diff.lo); \
3313 /* sum[hi:lo] += add */
3314 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3317 s_hi += (s_lo < a) ? 1 : 0; \
3320 #define UPDATE_EXTEND_STAT(s) \
3322 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3323 pstats->mac_stx[1].s##_lo, \
3327 #define UPDATE_EXTEND_TSTAT(s, t) \
3329 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3330 old_tclient->s = tclient->s; \
3331 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3334 #define UPDATE_EXTEND_USTAT(s, t) \
3336 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3337 old_uclient->s = uclient->s; \
3338 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3341 #define UPDATE_EXTEND_XSTAT(s, t) \
3343 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3344 old_xclient->s = xclient->s; \
3345 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3348 /* minuend -= subtrahend */
3349 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3351 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3354 /* minuend[hi:lo] -= subtrahend */
3355 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3357 SUB_64(m_hi, 0, m_lo, s); \
3360 #define SUB_EXTEND_USTAT(s, t) \
3362 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3363 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367 * General service functions
3370 static inline long bnx2x_hilo(u32 *hiref)
3372 u32 lo = *(hiref + 1);
3373 #if (BITS_PER_LONG == 64)
3376 return HILO_U64(hi, lo);
3383 * Init service functions
3386 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3388 if (!bp->stats_pending) {
3389 struct eth_query_ramrod_data ramrod_data = {0};
3392 ramrod_data.drv_counter = bp->stats_counter++;
3393 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3394 for_each_queue(bp, i)
3395 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3397 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3398 ((u32 *)&ramrod_data)[1],
3399 ((u32 *)&ramrod_data)[0], 0);
3401 /* stats ramrod has it's own slot on the spq */
3403 bp->stats_pending = 1;
3408 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3410 struct dmae_command *dmae = &bp->stats_dmae;
3411 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3413 *stats_comp = DMAE_COMP_VAL;
3414 if (CHIP_REV_IS_SLOW(bp))
3418 if (bp->executer_idx) {
3419 int loader_idx = PMF_DMAE_C(bp);
3421 memset(dmae, 0, sizeof(struct dmae_command));
3423 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3424 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3425 DMAE_CMD_DST_RESET |
3427 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3429 DMAE_CMD_ENDIANITY_DW_SWAP |
3431 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3433 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3434 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3435 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3436 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3437 sizeof(struct dmae_command) *
3438 (loader_idx + 1)) >> 2;
3439 dmae->dst_addr_hi = 0;
3440 dmae->len = sizeof(struct dmae_command) >> 2;
3443 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3444 dmae->comp_addr_hi = 0;
3448 bnx2x_post_dmae(bp, dmae, loader_idx);
3450 } else if (bp->func_stx) {
3452 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3456 static int bnx2x_stats_comp(struct bnx2x *bp)
3458 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3462 while (*stats_comp != DMAE_COMP_VAL) {
3464 BNX2X_ERR("timeout waiting for stats finished\n");
3474 * Statistics service functions
3477 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3479 struct dmae_command *dmae;
3481 int loader_idx = PMF_DMAE_C(bp);
3482 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3485 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3486 BNX2X_ERR("BUG!\n");
3490 bp->executer_idx = 0;
3492 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3494 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3496 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3498 DMAE_CMD_ENDIANITY_DW_SWAP |
3500 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3501 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3503 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3504 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3505 dmae->src_addr_lo = bp->port.port_stx >> 2;
3506 dmae->src_addr_hi = 0;
3507 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3508 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3509 dmae->len = DMAE_LEN32_RD_MAX;
3510 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3511 dmae->comp_addr_hi = 0;
3514 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3515 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3516 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3517 dmae->src_addr_hi = 0;
3518 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3519 DMAE_LEN32_RD_MAX * 4);
3520 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3521 DMAE_LEN32_RD_MAX * 4);
3522 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3523 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3524 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3525 dmae->comp_val = DMAE_COMP_VAL;
3528 bnx2x_hw_stats_post(bp);
3529 bnx2x_stats_comp(bp);
3532 static void bnx2x_port_stats_init(struct bnx2x *bp)
3534 struct dmae_command *dmae;
3535 int port = BP_PORT(bp);
3536 int vn = BP_E1HVN(bp);
3538 int loader_idx = PMF_DMAE_C(bp);
3540 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3543 if (!bp->link_vars.link_up || !bp->port.pmf) {
3544 BNX2X_ERR("BUG!\n");
3548 bp->executer_idx = 0;
3551 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3552 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3553 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3555 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3557 DMAE_CMD_ENDIANITY_DW_SWAP |
3559 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3560 (vn << DMAE_CMD_E1HVN_SHIFT));
3562 if (bp->port.port_stx) {
3564 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3565 dmae->opcode = opcode;
3566 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3567 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3568 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3569 dmae->dst_addr_hi = 0;
3570 dmae->len = sizeof(struct host_port_stats) >> 2;
3571 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3572 dmae->comp_addr_hi = 0;
3578 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3579 dmae->opcode = opcode;
3580 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3581 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3582 dmae->dst_addr_lo = bp->func_stx >> 2;
3583 dmae->dst_addr_hi = 0;
3584 dmae->len = sizeof(struct host_func_stats) >> 2;
3585 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3586 dmae->comp_addr_hi = 0;
3591 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3592 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3593 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3595 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3597 DMAE_CMD_ENDIANITY_DW_SWAP |
3599 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3600 (vn << DMAE_CMD_E1HVN_SHIFT));
3602 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3604 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3605 NIG_REG_INGRESS_BMAC0_MEM);
3607 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3608 BIGMAC_REGISTER_TX_STAT_GTBYT */
3609 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3610 dmae->opcode = opcode;
3611 dmae->src_addr_lo = (mac_addr +
3612 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3613 dmae->src_addr_hi = 0;
3614 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3615 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3616 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3617 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3618 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3619 dmae->comp_addr_hi = 0;
3622 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3623 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3624 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3625 dmae->opcode = opcode;
3626 dmae->src_addr_lo = (mac_addr +
3627 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3628 dmae->src_addr_hi = 0;
3629 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3630 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3631 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3632 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3633 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3634 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3635 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3636 dmae->comp_addr_hi = 0;
3639 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3641 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3643 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3644 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3645 dmae->opcode = opcode;
3646 dmae->src_addr_lo = (mac_addr +
3647 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3648 dmae->src_addr_hi = 0;
3649 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3650 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3651 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3652 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3653 dmae->comp_addr_hi = 0;
3656 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3657 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3658 dmae->opcode = opcode;
3659 dmae->src_addr_lo = (mac_addr +
3660 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3661 dmae->src_addr_hi = 0;
3662 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3663 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3664 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3665 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3667 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3668 dmae->comp_addr_hi = 0;
3671 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3672 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3673 dmae->opcode = opcode;
3674 dmae->src_addr_lo = (mac_addr +
3675 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3676 dmae->src_addr_hi = 0;
3677 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3678 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3679 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3680 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3681 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3682 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3683 dmae->comp_addr_hi = 0;
3688 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3689 dmae->opcode = opcode;
3690 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3691 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3692 dmae->src_addr_hi = 0;
3693 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3694 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3695 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3696 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3697 dmae->comp_addr_hi = 0;
3700 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3701 dmae->opcode = opcode;
3702 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3703 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3704 dmae->src_addr_hi = 0;
3705 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3706 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3707 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3708 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3709 dmae->len = (2*sizeof(u32)) >> 2;
3710 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3711 dmae->comp_addr_hi = 0;
3714 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3715 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3716 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3717 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3719 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3721 DMAE_CMD_ENDIANITY_DW_SWAP |
3723 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3724 (vn << DMAE_CMD_E1HVN_SHIFT));
3725 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3726 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3727 dmae->src_addr_hi = 0;
3728 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3729 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3730 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3731 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3732 dmae->len = (2*sizeof(u32)) >> 2;
3733 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3734 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3735 dmae->comp_val = DMAE_COMP_VAL;
3740 static void bnx2x_func_stats_init(struct bnx2x *bp)
3742 struct dmae_command *dmae = &bp->stats_dmae;
3743 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3746 if (!bp->func_stx) {
3747 BNX2X_ERR("BUG!\n");
3751 bp->executer_idx = 0;
3752 memset(dmae, 0, sizeof(struct dmae_command));
3754 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3755 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3756 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3758 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3760 DMAE_CMD_ENDIANITY_DW_SWAP |
3762 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3763 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3764 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3765 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3766 dmae->dst_addr_lo = bp->func_stx >> 2;
3767 dmae->dst_addr_hi = 0;
3768 dmae->len = sizeof(struct host_func_stats) >> 2;
3769 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3770 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3771 dmae->comp_val = DMAE_COMP_VAL;
3776 static void bnx2x_stats_start(struct bnx2x *bp)
3779 bnx2x_port_stats_init(bp);
3781 else if (bp->func_stx)
3782 bnx2x_func_stats_init(bp);
3784 bnx2x_hw_stats_post(bp);
3785 bnx2x_storm_stats_post(bp);
3788 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3790 bnx2x_stats_comp(bp);
3791 bnx2x_stats_pmf_update(bp);
3792 bnx2x_stats_start(bp);
3795 static void bnx2x_stats_restart(struct bnx2x *bp)
3797 bnx2x_stats_comp(bp);
3798 bnx2x_stats_start(bp);
3801 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3803 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3804 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3805 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3811 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3812 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3813 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3814 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3815 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3816 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3817 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3818 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3819 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3820 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3821 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3822 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3823 UPDATE_STAT64(tx_stat_gt127,
3824 tx_stat_etherstatspkts65octetsto127octets);
3825 UPDATE_STAT64(tx_stat_gt255,
3826 tx_stat_etherstatspkts128octetsto255octets);
3827 UPDATE_STAT64(tx_stat_gt511,
3828 tx_stat_etherstatspkts256octetsto511octets);
3829 UPDATE_STAT64(tx_stat_gt1023,
3830 tx_stat_etherstatspkts512octetsto1023octets);
3831 UPDATE_STAT64(tx_stat_gt1518,
3832 tx_stat_etherstatspkts1024octetsto1522octets);
3833 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3834 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3835 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3836 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3837 UPDATE_STAT64(tx_stat_gterr,
3838 tx_stat_dot3statsinternalmactransmiterrors);
3839 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3841 estats->pause_frames_received_hi =
3842 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3843 estats->pause_frames_received_lo =
3844 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3846 estats->pause_frames_sent_hi =
3847 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3848 estats->pause_frames_sent_lo =
3849 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3852 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3854 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3855 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3856 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3858 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3859 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3860 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3861 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3862 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3863 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3864 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3865 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3866 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3867 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3868 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3869 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3870 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3871 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3872 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3873 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3874 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3875 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3876 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3877 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3878 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3879 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3880 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3881 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3882 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3883 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3884 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3885 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3886 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3887 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3888 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3890 estats->pause_frames_received_hi =
3891 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3892 estats->pause_frames_received_lo =
3893 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3894 ADD_64(estats->pause_frames_received_hi,
3895 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3896 estats->pause_frames_received_lo,
3897 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3899 estats->pause_frames_sent_hi =
3900 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3901 estats->pause_frames_sent_lo =
3902 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3903 ADD_64(estats->pause_frames_sent_hi,
3904 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3905 estats->pause_frames_sent_lo,
3906 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3909 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3911 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3912 struct nig_stats *old = &(bp->port.old_nig_stats);
3913 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3914 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3921 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3922 bnx2x_bmac_stats_update(bp);
3924 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3925 bnx2x_emac_stats_update(bp);
3927 else { /* unreached */
3928 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3932 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3933 new->brb_discard - old->brb_discard);
3934 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3935 new->brb_truncate - old->brb_truncate);
3937 UPDATE_STAT64_NIG(egress_mac_pkt0,
3938 etherstatspkts1024octetsto1522octets);
3939 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3941 memcpy(old, new, sizeof(struct nig_stats));
3943 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3944 sizeof(struct mac_stx));
3945 estats->brb_drop_hi = pstats->brb_drop_hi;
3946 estats->brb_drop_lo = pstats->brb_drop_lo;
3948 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3950 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3951 if (nig_timer_max != estats->nig_timer_max) {
3952 estats->nig_timer_max = nig_timer_max;
3953 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3959 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3961 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3962 struct tstorm_per_port_stats *tport =
3963 &stats->tstorm_common.port_statistics;
3964 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3965 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3968 memcpy(&(fstats->total_bytes_received_hi),
3969 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3970 sizeof(struct host_func_stats) - 2*sizeof(u32));
3971 estats->error_bytes_received_hi = 0;
3972 estats->error_bytes_received_lo = 0;
3973 estats->etherstatsoverrsizepkts_hi = 0;
3974 estats->etherstatsoverrsizepkts_lo = 0;
3975 estats->no_buff_discard_hi = 0;
3976 estats->no_buff_discard_lo = 0;
3978 for_each_rx_queue(bp, i) {
3979 struct bnx2x_fastpath *fp = &bp->fp[i];
3980 int cl_id = fp->cl_id;
3981 struct tstorm_per_client_stats *tclient =
3982 &stats->tstorm_common.client_statistics[cl_id];
3983 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3984 struct ustorm_per_client_stats *uclient =
3985 &stats->ustorm_common.client_statistics[cl_id];
3986 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3987 struct xstorm_per_client_stats *xclient =
3988 &stats->xstorm_common.client_statistics[cl_id];
3989 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3990 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3993 /* are storm stats valid? */
3994 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3995 bp->stats_counter) {
3996 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3997 " xstorm counter (%d) != stats_counter (%d)\n",
3998 i, xclient->stats_counter, bp->stats_counter);
4001 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4002 bp->stats_counter) {
4003 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4004 " tstorm counter (%d) != stats_counter (%d)\n",
4005 i, tclient->stats_counter, bp->stats_counter);
4008 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4009 bp->stats_counter) {
4010 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4011 " ustorm counter (%d) != stats_counter (%d)\n",
4012 i, uclient->stats_counter, bp->stats_counter);
4016 qstats->total_bytes_received_hi =
4017 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4018 qstats->total_bytes_received_lo =
4019 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4021 ADD_64(qstats->total_bytes_received_hi,
4022 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4023 qstats->total_bytes_received_lo,
4024 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4026 ADD_64(qstats->total_bytes_received_hi,
4027 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4028 qstats->total_bytes_received_lo,
4029 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4031 qstats->valid_bytes_received_hi =
4032 qstats->total_bytes_received_hi;
4033 qstats->valid_bytes_received_lo =
4034 qstats->total_bytes_received_lo;
4036 qstats->error_bytes_received_hi =
4037 le32_to_cpu(tclient->rcv_error_bytes.hi);
4038 qstats->error_bytes_received_lo =
4039 le32_to_cpu(tclient->rcv_error_bytes.lo);
4041 ADD_64(qstats->total_bytes_received_hi,
4042 qstats->error_bytes_received_hi,
4043 qstats->total_bytes_received_lo,
4044 qstats->error_bytes_received_lo);
4046 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4047 total_unicast_packets_received);
4048 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4049 total_multicast_packets_received);
4050 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4051 total_broadcast_packets_received);
4052 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4053 etherstatsoverrsizepkts);
4054 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4056 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4057 total_unicast_packets_received);
4058 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4059 total_multicast_packets_received);
4060 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4061 total_broadcast_packets_received);
4062 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4063 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4064 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4066 qstats->total_bytes_transmitted_hi =
4067 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4068 qstats->total_bytes_transmitted_lo =
4069 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4071 ADD_64(qstats->total_bytes_transmitted_hi,
4072 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4073 qstats->total_bytes_transmitted_lo,
4074 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4076 ADD_64(qstats->total_bytes_transmitted_hi,
4077 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4078 qstats->total_bytes_transmitted_lo,
4079 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4081 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4082 total_unicast_packets_transmitted);
4083 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4084 total_multicast_packets_transmitted);
4085 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4086 total_broadcast_packets_transmitted);
4088 old_tclient->checksum_discard = tclient->checksum_discard;
4089 old_tclient->ttl0_discard = tclient->ttl0_discard;
4091 ADD_64(fstats->total_bytes_received_hi,
4092 qstats->total_bytes_received_hi,
4093 fstats->total_bytes_received_lo,
4094 qstats->total_bytes_received_lo);
4095 ADD_64(fstats->total_bytes_transmitted_hi,
4096 qstats->total_bytes_transmitted_hi,
4097 fstats->total_bytes_transmitted_lo,
4098 qstats->total_bytes_transmitted_lo);
4099 ADD_64(fstats->total_unicast_packets_received_hi,
4100 qstats->total_unicast_packets_received_hi,
4101 fstats->total_unicast_packets_received_lo,
4102 qstats->total_unicast_packets_received_lo);
4103 ADD_64(fstats->total_multicast_packets_received_hi,
4104 qstats->total_multicast_packets_received_hi,
4105 fstats->total_multicast_packets_received_lo,
4106 qstats->total_multicast_packets_received_lo);
4107 ADD_64(fstats->total_broadcast_packets_received_hi,
4108 qstats->total_broadcast_packets_received_hi,
4109 fstats->total_broadcast_packets_received_lo,
4110 qstats->total_broadcast_packets_received_lo);
4111 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4112 qstats->total_unicast_packets_transmitted_hi,
4113 fstats->total_unicast_packets_transmitted_lo,
4114 qstats->total_unicast_packets_transmitted_lo);
4115 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4116 qstats->total_multicast_packets_transmitted_hi,
4117 fstats->total_multicast_packets_transmitted_lo,
4118 qstats->total_multicast_packets_transmitted_lo);
4119 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4120 qstats->total_broadcast_packets_transmitted_hi,
4121 fstats->total_broadcast_packets_transmitted_lo,
4122 qstats->total_broadcast_packets_transmitted_lo);
4123 ADD_64(fstats->valid_bytes_received_hi,
4124 qstats->valid_bytes_received_hi,
4125 fstats->valid_bytes_received_lo,
4126 qstats->valid_bytes_received_lo);
4128 ADD_64(estats->error_bytes_received_hi,
4129 qstats->error_bytes_received_hi,
4130 estats->error_bytes_received_lo,
4131 qstats->error_bytes_received_lo);
4132 ADD_64(estats->etherstatsoverrsizepkts_hi,
4133 qstats->etherstatsoverrsizepkts_hi,
4134 estats->etherstatsoverrsizepkts_lo,
4135 qstats->etherstatsoverrsizepkts_lo);
4136 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4137 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4140 ADD_64(fstats->total_bytes_received_hi,
4141 estats->rx_stat_ifhcinbadoctets_hi,
4142 fstats->total_bytes_received_lo,
4143 estats->rx_stat_ifhcinbadoctets_lo);
4145 memcpy(estats, &(fstats->total_bytes_received_hi),
4146 sizeof(struct host_func_stats) - 2*sizeof(u32));
4148 ADD_64(estats->etherstatsoverrsizepkts_hi,
4149 estats->rx_stat_dot3statsframestoolong_hi,
4150 estats->etherstatsoverrsizepkts_lo,
4151 estats->rx_stat_dot3statsframestoolong_lo);
4152 ADD_64(estats->error_bytes_received_hi,
4153 estats->rx_stat_ifhcinbadoctets_hi,
4154 estats->error_bytes_received_lo,
4155 estats->rx_stat_ifhcinbadoctets_lo);
4158 estats->mac_filter_discard =
4159 le32_to_cpu(tport->mac_filter_discard);
4160 estats->xxoverflow_discard =
4161 le32_to_cpu(tport->xxoverflow_discard);
4162 estats->brb_truncate_discard =
4163 le32_to_cpu(tport->brb_truncate_discard);
4164 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4167 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4169 bp->stats_pending = 0;
4174 static void bnx2x_net_stats_update(struct bnx2x *bp)
4176 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4177 struct net_device_stats *nstats = &bp->dev->stats;
4180 nstats->rx_packets =
4181 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4182 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4183 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4185 nstats->tx_packets =
4186 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4187 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4188 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4190 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4192 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4194 nstats->rx_dropped = estats->mac_discard;
4195 for_each_rx_queue(bp, i)
4196 nstats->rx_dropped +=
4197 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4199 nstats->tx_dropped = 0;
4202 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4204 nstats->collisions =
4205 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4207 nstats->rx_length_errors =
4208 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4209 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4210 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4211 bnx2x_hilo(&estats->brb_truncate_hi);
4212 nstats->rx_crc_errors =
4213 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4214 nstats->rx_frame_errors =
4215 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4216 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4217 nstats->rx_missed_errors = estats->xxoverflow_discard;
4219 nstats->rx_errors = nstats->rx_length_errors +
4220 nstats->rx_over_errors +
4221 nstats->rx_crc_errors +
4222 nstats->rx_frame_errors +
4223 nstats->rx_fifo_errors +
4224 nstats->rx_missed_errors;
4226 nstats->tx_aborted_errors =
4227 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4228 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4229 nstats->tx_carrier_errors =
4230 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4231 nstats->tx_fifo_errors = 0;
4232 nstats->tx_heartbeat_errors = 0;
4233 nstats->tx_window_errors = 0;
4235 nstats->tx_errors = nstats->tx_aborted_errors +
4236 nstats->tx_carrier_errors +
4237 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4240 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4242 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4245 estats->driver_xoff = 0;
4246 estats->rx_err_discard_pkt = 0;
4247 estats->rx_skb_alloc_failed = 0;
4248 estats->hw_csum_err = 0;
4249 for_each_rx_queue(bp, i) {
4250 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4252 estats->driver_xoff += qstats->driver_xoff;
4253 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4254 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4255 estats->hw_csum_err += qstats->hw_csum_err;
4259 static void bnx2x_stats_update(struct bnx2x *bp)
4261 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4263 if (*stats_comp != DMAE_COMP_VAL)
4267 bnx2x_hw_stats_update(bp);
4269 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4270 BNX2X_ERR("storm stats were not updated for 3 times\n");
4275 bnx2x_net_stats_update(bp);
4276 bnx2x_drv_stats_update(bp);
4278 if (bp->msglevel & NETIF_MSG_TIMER) {
4279 struct bnx2x_fastpath *fp0_rx = bp->fp;
4280 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4281 struct tstorm_per_client_stats *old_tclient =
4282 &bp->fp->old_tclient;
4283 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4284 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4285 struct net_device_stats *nstats = &bp->dev->stats;
4288 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4289 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4291 bnx2x_tx_avail(fp0_tx),
4292 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4293 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4295 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4296 fp0_rx->rx_comp_cons),
4297 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4298 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4299 "brb truncate %u\n",
4300 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4301 qstats->driver_xoff,
4302 estats->brb_drop_lo, estats->brb_truncate_lo);
4303 printk(KERN_DEBUG "tstats: checksum_discard %u "
4304 "packets_too_big_discard %lu no_buff_discard %lu "
4305 "mac_discard %u mac_filter_discard %u "
4306 "xxovrflow_discard %u brb_truncate_discard %u "
4307 "ttl0_discard %u\n",
4308 le32_to_cpu(old_tclient->checksum_discard),
4309 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4310 bnx2x_hilo(&qstats->no_buff_discard_hi),
4311 estats->mac_discard, estats->mac_filter_discard,
4312 estats->xxoverflow_discard, estats->brb_truncate_discard,
4313 le32_to_cpu(old_tclient->ttl0_discard));
4315 for_each_queue(bp, i) {
4316 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4317 bnx2x_fp(bp, i, tx_pkt),
4318 bnx2x_fp(bp, i, rx_pkt),
4319 bnx2x_fp(bp, i, rx_calls));
4323 bnx2x_hw_stats_post(bp);
4324 bnx2x_storm_stats_post(bp);
4327 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4329 struct dmae_command *dmae;
4331 int loader_idx = PMF_DMAE_C(bp);
4332 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4334 bp->executer_idx = 0;
4336 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4338 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4340 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4342 DMAE_CMD_ENDIANITY_DW_SWAP |
4344 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4345 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4347 if (bp->port.port_stx) {
4349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4351 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4353 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4354 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4355 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4356 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4357 dmae->dst_addr_hi = 0;
4358 dmae->len = sizeof(struct host_port_stats) >> 2;
4360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4361 dmae->comp_addr_hi = 0;
4364 dmae->comp_addr_lo =
4365 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4366 dmae->comp_addr_hi =
4367 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4368 dmae->comp_val = DMAE_COMP_VAL;
4376 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4377 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4378 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4379 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4380 dmae->dst_addr_lo = bp->func_stx >> 2;
4381 dmae->dst_addr_hi = 0;
4382 dmae->len = sizeof(struct host_func_stats) >> 2;
4383 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4384 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4385 dmae->comp_val = DMAE_COMP_VAL;
4391 static void bnx2x_stats_stop(struct bnx2x *bp)
4395 bnx2x_stats_comp(bp);
4398 update = (bnx2x_hw_stats_update(bp) == 0);
4400 update |= (bnx2x_storm_stats_update(bp) == 0);
4403 bnx2x_net_stats_update(bp);
4406 bnx2x_port_stats_stop(bp);
4408 bnx2x_hw_stats_post(bp);
4409 bnx2x_stats_comp(bp);
4413 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4417 static const struct {
4418 void (*action)(struct bnx2x *bp);
4419 enum bnx2x_stats_state next_state;
4420 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4423 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4424 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4425 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4426 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4429 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4430 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4431 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4432 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4436 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4438 enum bnx2x_stats_state state = bp->stats_state;
4440 bnx2x_stats_stm[state][event].action(bp);
4441 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4443 /* Make sure the state has been "changed" */
4446 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4447 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4448 state, event, bp->stats_state);
4451 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4453 struct dmae_command *dmae;
4454 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4457 if (!bp->port.pmf || !bp->port.port_stx) {
4458 BNX2X_ERR("BUG!\n");
4462 bp->executer_idx = 0;
4464 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4465 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4466 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4467 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4469 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4471 DMAE_CMD_ENDIANITY_DW_SWAP |
4473 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4474 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4475 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4476 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4477 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4478 dmae->dst_addr_hi = 0;
4479 dmae->len = sizeof(struct host_port_stats) >> 2;
4480 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4481 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4482 dmae->comp_val = DMAE_COMP_VAL;
4485 bnx2x_hw_stats_post(bp);
4486 bnx2x_stats_comp(bp);
4489 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4491 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4492 int port = BP_PORT(bp);
4497 if (!bp->port.pmf || !bp->func_stx) {
4498 BNX2X_ERR("BUG!\n");
4502 /* save our func_stx */
4503 func_stx = bp->func_stx;
4505 for (vn = VN_0; vn < vn_max; vn++) {
4508 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4509 bnx2x_func_stats_init(bp);
4510 bnx2x_hw_stats_post(bp);
4511 bnx2x_stats_comp(bp);
4514 /* restore our func_stx */
4515 bp->func_stx = func_stx;
4518 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4520 struct dmae_command *dmae = &bp->stats_dmae;
4521 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4524 if (!bp->func_stx) {
4525 BNX2X_ERR("BUG!\n");
4529 bp->executer_idx = 0;
4530 memset(dmae, 0, sizeof(struct dmae_command));
4532 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4533 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4534 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4536 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4538 DMAE_CMD_ENDIANITY_DW_SWAP |
4540 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4541 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4542 dmae->src_addr_lo = bp->func_stx >> 2;
4543 dmae->src_addr_hi = 0;
4544 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4545 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4546 dmae->len = sizeof(struct host_func_stats) >> 2;
4547 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4548 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4549 dmae->comp_val = DMAE_COMP_VAL;
4552 bnx2x_hw_stats_post(bp);
4553 bnx2x_stats_comp(bp);
4556 static void bnx2x_stats_init(struct bnx2x *bp)
4558 int port = BP_PORT(bp);
4559 int func = BP_FUNC(bp);
4562 bp->stats_pending = 0;
4563 bp->executer_idx = 0;
4564 bp->stats_counter = 0;
4566 /* port and func stats for management */
4567 if (!BP_NOMCP(bp)) {
4568 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4569 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4572 bp->port.port_stx = 0;
4575 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4576 bp->port.port_stx, bp->func_stx);
4579 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4580 bp->port.old_nig_stats.brb_discard =
4581 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4582 bp->port.old_nig_stats.brb_truncate =
4583 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4584 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4585 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4586 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4587 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4589 /* function stats */
4590 for_each_queue(bp, i) {
4591 struct bnx2x_fastpath *fp = &bp->fp[i];
4593 memset(&fp->old_tclient, 0,
4594 sizeof(struct tstorm_per_client_stats));
4595 memset(&fp->old_uclient, 0,
4596 sizeof(struct ustorm_per_client_stats));
4597 memset(&fp->old_xclient, 0,
4598 sizeof(struct xstorm_per_client_stats));
4599 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4602 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4603 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4605 bp->stats_state = STATS_STATE_DISABLED;
4608 if (bp->port.port_stx)
4609 bnx2x_port_stats_base_init(bp);
4612 bnx2x_func_stats_base_init(bp);
4614 } else if (bp->func_stx)
4615 bnx2x_func_stats_base_update(bp);
4618 static void bnx2x_timer(unsigned long data)
4620 struct bnx2x *bp = (struct bnx2x *) data;
4622 if (!netif_running(bp->dev))
4625 if (atomic_read(&bp->intr_sem) != 0)
4629 struct bnx2x_fastpath *fp = &bp->fp[0];
4633 rc = bnx2x_rx_int(fp, 1000);
4636 if (!BP_NOMCP(bp)) {
4637 int func = BP_FUNC(bp);
4641 ++bp->fw_drv_pulse_wr_seq;
4642 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4643 /* TBD - add SYSTEM_TIME */
4644 drv_pulse = bp->fw_drv_pulse_wr_seq;
4645 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4647 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4648 MCP_PULSE_SEQ_MASK);
4649 /* The delta between driver pulse and mcp response
4650 * should be 1 (before mcp response) or 0 (after mcp response)
4652 if ((drv_pulse != mcp_pulse) &&
4653 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4654 /* someone lost a heartbeat... */
4655 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4656 drv_pulse, mcp_pulse);
4660 if ((bp->state == BNX2X_STATE_OPEN) ||
4661 (bp->state == BNX2X_STATE_DISABLED))
4662 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4665 mod_timer(&bp->timer, jiffies + bp->current_interval);
4668 /* end of Statistics */
4673 * nic init service functions
4676 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4678 int port = BP_PORT(bp);
4681 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4682 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4683 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4684 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4685 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4686 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4689 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4690 dma_addr_t mapping, int sb_id)
4692 int port = BP_PORT(bp);
4693 int func = BP_FUNC(bp);
4698 section = ((u64)mapping) + offsetof(struct host_status_block,
4700 sb->u_status_block.status_block_id = sb_id;
4702 REG_WR(bp, BAR_CSTRORM_INTMEM +
4703 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4704 REG_WR(bp, BAR_CSTRORM_INTMEM +
4705 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4707 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4708 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4710 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4711 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4712 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4715 section = ((u64)mapping) + offsetof(struct host_status_block,
4717 sb->c_status_block.status_block_id = sb_id;
4719 REG_WR(bp, BAR_CSTRORM_INTMEM +
4720 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4721 REG_WR(bp, BAR_CSTRORM_INTMEM +
4722 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4724 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4725 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4727 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4728 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4729 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4731 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4734 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4736 int func = BP_FUNC(bp);
4738 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4739 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4740 sizeof(struct tstorm_def_status_block)/4);
4741 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4742 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4743 sizeof(struct cstorm_def_status_block_u)/4);
4744 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4745 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4746 sizeof(struct cstorm_def_status_block_c)/4);
4747 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4748 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4749 sizeof(struct xstorm_def_status_block)/4);
4752 static void bnx2x_init_def_sb(struct bnx2x *bp,
4753 struct host_def_status_block *def_sb,
4754 dma_addr_t mapping, int sb_id)
4756 int port = BP_PORT(bp);
4757 int func = BP_FUNC(bp);
4758 int index, val, reg_offset;
4762 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4763 atten_status_block);
4764 def_sb->atten_status_block.status_block_id = sb_id;
4768 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4769 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4771 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4772 bp->attn_group[index].sig[0] = REG_RD(bp,
4773 reg_offset + 0x10*index);
4774 bp->attn_group[index].sig[1] = REG_RD(bp,
4775 reg_offset + 0x4 + 0x10*index);
4776 bp->attn_group[index].sig[2] = REG_RD(bp,
4777 reg_offset + 0x8 + 0x10*index);
4778 bp->attn_group[index].sig[3] = REG_RD(bp,
4779 reg_offset + 0xc + 0x10*index);
4782 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4783 HC_REG_ATTN_MSG0_ADDR_L);
4785 REG_WR(bp, reg_offset, U64_LO(section));
4786 REG_WR(bp, reg_offset + 4, U64_HI(section));
4788 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4790 val = REG_RD(bp, reg_offset);
4792 REG_WR(bp, reg_offset, val);
4795 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4796 u_def_status_block);
4797 def_sb->u_def_status_block.status_block_id = sb_id;
4799 REG_WR(bp, BAR_CSTRORM_INTMEM +
4800 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4801 REG_WR(bp, BAR_CSTRORM_INTMEM +
4802 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4804 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4805 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4807 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4808 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4809 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4812 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4813 c_def_status_block);
4814 def_sb->c_def_status_block.status_block_id = sb_id;
4816 REG_WR(bp, BAR_CSTRORM_INTMEM +
4817 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4818 REG_WR(bp, BAR_CSTRORM_INTMEM +
4819 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4821 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4822 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4824 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4825 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4826 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4829 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4830 t_def_status_block);
4831 def_sb->t_def_status_block.status_block_id = sb_id;
4833 REG_WR(bp, BAR_TSTRORM_INTMEM +
4834 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4835 REG_WR(bp, BAR_TSTRORM_INTMEM +
4836 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4838 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4839 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4841 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4842 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4843 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4846 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4847 x_def_status_block);
4848 def_sb->x_def_status_block.status_block_id = sb_id;
4850 REG_WR(bp, BAR_XSTRORM_INTMEM +
4851 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4852 REG_WR(bp, BAR_XSTRORM_INTMEM +
4853 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4855 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4856 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4858 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4859 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4860 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4862 bp->stats_pending = 0;
4863 bp->set_mac_pending = 0;
4865 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4868 static void bnx2x_update_coalesce(struct bnx2x *bp)
4870 int port = BP_PORT(bp);
4873 for_each_queue(bp, i) {
4874 int sb_id = bp->fp[i].sb_id;
4876 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4877 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4878 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4879 U_SB_ETH_RX_CQ_INDEX),
4881 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4882 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4883 U_SB_ETH_RX_CQ_INDEX),
4884 (bp->rx_ticks/12) ? 0 : 1);
4886 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4887 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4888 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4889 C_SB_ETH_TX_CQ_INDEX),
4891 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4892 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4893 C_SB_ETH_TX_CQ_INDEX),
4894 (bp->tx_ticks/12) ? 0 : 1);
4898 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4899 struct bnx2x_fastpath *fp, int last)
4903 for (i = 0; i < last; i++) {
4904 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4905 struct sk_buff *skb = rx_buf->skb;
4908 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4912 if (fp->tpa_state[i] == BNX2X_TPA_START)
4913 pci_unmap_single(bp->pdev,
4914 pci_unmap_addr(rx_buf, mapping),
4915 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4922 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4924 int func = BP_FUNC(bp);
4925 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4926 ETH_MAX_AGGREGATION_QUEUES_E1H;
4927 u16 ring_prod, cqe_ring_prod;
4930 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4932 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4934 if (bp->flags & TPA_ENABLE_FLAG) {
4936 for_each_rx_queue(bp, j) {
4937 struct bnx2x_fastpath *fp = &bp->fp[j];
4939 for (i = 0; i < max_agg_queues; i++) {
4940 fp->tpa_pool[i].skb =
4941 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4942 if (!fp->tpa_pool[i].skb) {
4943 BNX2X_ERR("Failed to allocate TPA "
4944 "skb pool for queue[%d] - "
4945 "disabling TPA on this "
4947 bnx2x_free_tpa_pool(bp, fp, i);
4948 fp->disable_tpa = 1;
4951 pci_unmap_addr_set((struct sw_rx_bd *)
4952 &bp->fp->tpa_pool[i],
4954 fp->tpa_state[i] = BNX2X_TPA_STOP;
4959 for_each_rx_queue(bp, j) {
4960 struct bnx2x_fastpath *fp = &bp->fp[j];
4963 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4964 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4966 /* Mark queue as Rx */
4967 fp->is_rx_queue = 1;
4969 /* "next page" elements initialization */
4971 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4972 struct eth_rx_sge *sge;
4974 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4976 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4977 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4979 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4980 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4983 bnx2x_init_sge_ring_bit_mask(fp);
4986 for (i = 1; i <= NUM_RX_RINGS; i++) {
4987 struct eth_rx_bd *rx_bd;
4989 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4991 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4992 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4994 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4995 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4999 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5000 struct eth_rx_cqe_next_page *nextpg;
5002 nextpg = (struct eth_rx_cqe_next_page *)
5003 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5005 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5006 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5008 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5009 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5012 /* Allocate SGEs and initialize the ring elements */
5013 for (i = 0, ring_prod = 0;
5014 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5016 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5017 BNX2X_ERR("was only able to allocate "
5019 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5020 /* Cleanup already allocated elements */
5021 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5022 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5023 fp->disable_tpa = 1;
5027 ring_prod = NEXT_SGE_IDX(ring_prod);
5029 fp->rx_sge_prod = ring_prod;
5031 /* Allocate BDs and initialize BD ring */
5032 fp->rx_comp_cons = 0;
5033 cqe_ring_prod = ring_prod = 0;
5034 for (i = 0; i < bp->rx_ring_size; i++) {
5035 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5036 BNX2X_ERR("was only able to allocate "
5037 "%d rx skbs on queue[%d]\n", i, j);
5038 fp->eth_q_stats.rx_skb_alloc_failed++;
5041 ring_prod = NEXT_RX_IDX(ring_prod);
5042 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5043 WARN_ON(ring_prod <= i);
5046 fp->rx_bd_prod = ring_prod;
5047 /* must not have more available CQEs than BDs */
5048 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5050 fp->rx_pkt = fp->rx_calls = 0;
5053 * this will generate an interrupt (to the TSTORM)
5054 * must only be done after chip is initialized
5056 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5061 REG_WR(bp, BAR_USTRORM_INTMEM +
5062 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5063 U64_LO(fp->rx_comp_mapping));
5064 REG_WR(bp, BAR_USTRORM_INTMEM +
5065 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5066 U64_HI(fp->rx_comp_mapping));
5070 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5074 for_each_tx_queue(bp, j) {
5075 struct bnx2x_fastpath *fp = &bp->fp[j];
5077 for (i = 1; i <= NUM_TX_RINGS; i++) {
5078 struct eth_tx_next_bd *tx_next_bd =
5079 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5081 tx_next_bd->addr_hi =
5082 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5083 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5084 tx_next_bd->addr_lo =
5085 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5086 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5089 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5090 fp->tx_db.data.zero_fill1 = 0;
5091 fp->tx_db.data.prod = 0;
5093 fp->tx_pkt_prod = 0;
5094 fp->tx_pkt_cons = 0;
5097 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5101 /* clean tx statistics */
5102 for_each_rx_queue(bp, i)
5103 bnx2x_fp(bp, i, tx_pkt) = 0;
5106 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5108 int func = BP_FUNC(bp);
5110 spin_lock_init(&bp->spq_lock);
5112 bp->spq_left = MAX_SPQ_PENDING;
5113 bp->spq_prod_idx = 0;
5114 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5115 bp->spq_prod_bd = bp->spq;
5116 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5118 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5119 U64_LO(bp->spq_mapping));
5121 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5122 U64_HI(bp->spq_mapping));
5124 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5128 static void bnx2x_init_context(struct bnx2x *bp)
5132 for_each_rx_queue(bp, i) {
5133 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5134 struct bnx2x_fastpath *fp = &bp->fp[i];
5135 u8 cl_id = fp->cl_id;
5137 context->ustorm_st_context.common.sb_index_numbers =
5138 BNX2X_RX_SB_INDEX_NUM;
5139 context->ustorm_st_context.common.clientId = cl_id;
5140 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5141 context->ustorm_st_context.common.flags =
5142 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5143 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5144 context->ustorm_st_context.common.statistics_counter_id =
5146 context->ustorm_st_context.common.mc_alignment_log_size =
5147 BNX2X_RX_ALIGN_SHIFT;
5148 context->ustorm_st_context.common.bd_buff_size =
5150 context->ustorm_st_context.common.bd_page_base_hi =
5151 U64_HI(fp->rx_desc_mapping);
5152 context->ustorm_st_context.common.bd_page_base_lo =
5153 U64_LO(fp->rx_desc_mapping);
5154 if (!fp->disable_tpa) {
5155 context->ustorm_st_context.common.flags |=
5156 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5157 context->ustorm_st_context.common.sge_buff_size =
5158 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5160 context->ustorm_st_context.common.sge_page_base_hi =
5161 U64_HI(fp->rx_sge_mapping);
5162 context->ustorm_st_context.common.sge_page_base_lo =
5163 U64_LO(fp->rx_sge_mapping);
5165 context->ustorm_st_context.common.max_sges_for_packet =
5166 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5167 context->ustorm_st_context.common.max_sges_for_packet =
5168 ((context->ustorm_st_context.common.
5169 max_sges_for_packet + PAGES_PER_SGE - 1) &
5170 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5173 context->ustorm_ag_context.cdu_usage =
5174 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5175 CDU_REGION_NUMBER_UCM_AG,
5176 ETH_CONNECTION_TYPE);
5178 context->xstorm_ag_context.cdu_reserved =
5179 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5180 CDU_REGION_NUMBER_XCM_AG,
5181 ETH_CONNECTION_TYPE);
5184 for_each_tx_queue(bp, i) {
5185 struct bnx2x_fastpath *fp = &bp->fp[i];
5186 struct eth_context *context =
5187 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5189 context->cstorm_st_context.sb_index_number =
5190 C_SB_ETH_TX_CQ_INDEX;
5191 context->cstorm_st_context.status_block_id = fp->sb_id;
5193 context->xstorm_st_context.tx_bd_page_base_hi =
5194 U64_HI(fp->tx_desc_mapping);
5195 context->xstorm_st_context.tx_bd_page_base_lo =
5196 U64_LO(fp->tx_desc_mapping);
5197 context->xstorm_st_context.statistics_data = (fp->cl_id |
5198 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5202 static void bnx2x_init_ind_table(struct bnx2x *bp)
5204 int func = BP_FUNC(bp);
5207 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5211 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5212 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5213 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5214 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5215 bp->fp->cl_id + (i % bp->num_rx_queues));
5218 static void bnx2x_set_client_config(struct bnx2x *bp)
5220 struct tstorm_eth_client_config tstorm_client = {0};
5221 int port = BP_PORT(bp);
5224 tstorm_client.mtu = bp->dev->mtu;
5225 tstorm_client.config_flags =
5226 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5227 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5229 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5230 tstorm_client.config_flags |=
5231 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5232 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5236 for_each_queue(bp, i) {
5237 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5239 REG_WR(bp, BAR_TSTRORM_INTMEM +
5240 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5241 ((u32 *)&tstorm_client)[0]);
5242 REG_WR(bp, BAR_TSTRORM_INTMEM +
5243 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5244 ((u32 *)&tstorm_client)[1]);
5247 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5248 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5251 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5253 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5254 int mode = bp->rx_mode;
5255 int mask = (1 << BP_L_ID(bp));
5256 int func = BP_FUNC(bp);
5257 int port = BP_PORT(bp);
5259 /* All but management unicast packets should pass to the host as well */
5261 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5262 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5263 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5264 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5266 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5269 case BNX2X_RX_MODE_NONE: /* no Rx */
5270 tstorm_mac_filter.ucast_drop_all = mask;
5271 tstorm_mac_filter.mcast_drop_all = mask;
5272 tstorm_mac_filter.bcast_drop_all = mask;
5275 case BNX2X_RX_MODE_NORMAL:
5276 tstorm_mac_filter.bcast_accept_all = mask;
5279 case BNX2X_RX_MODE_ALLMULTI:
5280 tstorm_mac_filter.mcast_accept_all = mask;
5281 tstorm_mac_filter.bcast_accept_all = mask;
5284 case BNX2X_RX_MODE_PROMISC:
5285 tstorm_mac_filter.ucast_accept_all = mask;
5286 tstorm_mac_filter.mcast_accept_all = mask;
5287 tstorm_mac_filter.bcast_accept_all = mask;
5288 /* pass management unicast packets as well */
5289 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5293 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5298 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5301 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5302 REG_WR(bp, BAR_TSTRORM_INTMEM +
5303 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5304 ((u32 *)&tstorm_mac_filter)[i]);
5306 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5307 ((u32 *)&tstorm_mac_filter)[i]); */
5310 if (mode != BNX2X_RX_MODE_NONE)
5311 bnx2x_set_client_config(bp);
5314 static void bnx2x_init_internal_common(struct bnx2x *bp)
5318 /* Zero this manually as its initialization is
5319 currently missing in the initTool */
5320 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5321 REG_WR(bp, BAR_USTRORM_INTMEM +
5322 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5325 static void bnx2x_init_internal_port(struct bnx2x *bp)
5327 int port = BP_PORT(bp);
5330 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5332 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5333 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5334 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5337 static void bnx2x_init_internal_func(struct bnx2x *bp)
5339 struct tstorm_eth_function_common_config tstorm_config = {0};
5340 struct stats_indication_flags stats_flags = {0};
5341 int port = BP_PORT(bp);
5342 int func = BP_FUNC(bp);
5348 tstorm_config.config_flags = MULTI_FLAGS(bp);
5349 tstorm_config.rss_result_mask = MULTI_MASK;
5352 /* Enable TPA if needed */
5353 if (bp->flags & TPA_ENABLE_FLAG)
5354 tstorm_config.config_flags |=
5355 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5358 tstorm_config.config_flags |=
5359 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5361 tstorm_config.leading_client_id = BP_L_ID(bp);
5363 REG_WR(bp, BAR_TSTRORM_INTMEM +
5364 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5365 (*(u32 *)&tstorm_config));
5367 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5368 bnx2x_set_storm_rx_mode(bp);
5370 for_each_queue(bp, i) {
5371 u8 cl_id = bp->fp[i].cl_id;
5373 /* reset xstorm per client statistics */
5374 offset = BAR_XSTRORM_INTMEM +
5375 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5377 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5378 REG_WR(bp, offset + j*4, 0);
5380 /* reset tstorm per client statistics */
5381 offset = BAR_TSTRORM_INTMEM +
5382 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5384 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5385 REG_WR(bp, offset + j*4, 0);
5387 /* reset ustorm per client statistics */
5388 offset = BAR_USTRORM_INTMEM +
5389 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5391 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5392 REG_WR(bp, offset + j*4, 0);
5395 /* Init statistics related context */
5396 stats_flags.collect_eth = 1;
5398 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5399 ((u32 *)&stats_flags)[0]);
5400 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5401 ((u32 *)&stats_flags)[1]);
5403 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5404 ((u32 *)&stats_flags)[0]);
5405 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5406 ((u32 *)&stats_flags)[1]);
5408 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5409 ((u32 *)&stats_flags)[0]);
5410 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5411 ((u32 *)&stats_flags)[1]);
5413 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5414 ((u32 *)&stats_flags)[0]);
5415 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5416 ((u32 *)&stats_flags)[1]);
5418 REG_WR(bp, BAR_XSTRORM_INTMEM +
5419 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5420 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5421 REG_WR(bp, BAR_XSTRORM_INTMEM +
5422 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5423 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5425 REG_WR(bp, BAR_TSTRORM_INTMEM +
5426 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5427 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5428 REG_WR(bp, BAR_TSTRORM_INTMEM +
5429 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5430 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5432 REG_WR(bp, BAR_USTRORM_INTMEM +
5433 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5434 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5435 REG_WR(bp, BAR_USTRORM_INTMEM +
5436 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5437 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5439 if (CHIP_IS_E1H(bp)) {
5440 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5442 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5444 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5446 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5449 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5453 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5455 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5456 SGE_PAGE_SIZE * PAGES_PER_SGE),
5458 for_each_rx_queue(bp, i) {
5459 struct bnx2x_fastpath *fp = &bp->fp[i];
5461 REG_WR(bp, BAR_USTRORM_INTMEM +
5462 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5463 U64_LO(fp->rx_comp_mapping));
5464 REG_WR(bp, BAR_USTRORM_INTMEM +
5465 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5466 U64_HI(fp->rx_comp_mapping));
5469 REG_WR(bp, BAR_USTRORM_INTMEM +
5470 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5471 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5472 REG_WR(bp, BAR_USTRORM_INTMEM +
5473 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5474 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5476 REG_WR16(bp, BAR_USTRORM_INTMEM +
5477 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5481 /* dropless flow control */
5482 if (CHIP_IS_E1H(bp)) {
5483 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5485 rx_pause.bd_thr_low = 250;
5486 rx_pause.cqe_thr_low = 250;
5488 rx_pause.sge_thr_low = 0;
5489 rx_pause.bd_thr_high = 350;
5490 rx_pause.cqe_thr_high = 350;
5491 rx_pause.sge_thr_high = 0;
5493 for_each_rx_queue(bp, i) {
5494 struct bnx2x_fastpath *fp = &bp->fp[i];
5496 if (!fp->disable_tpa) {
5497 rx_pause.sge_thr_low = 150;
5498 rx_pause.sge_thr_high = 250;
5502 offset = BAR_USTRORM_INTMEM +
5503 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5506 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5508 REG_WR(bp, offset + j*4,
5509 ((u32 *)&rx_pause)[j]);
5513 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5515 /* Init rate shaping and fairness contexts */
5519 /* During init there is no active link
5520 Until link is up, set link rate to 10Gbps */
5521 bp->link_vars.line_speed = SPEED_10000;
5522 bnx2x_init_port_minmax(bp);
5524 bnx2x_calc_vn_weight_sum(bp);
5526 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5527 bnx2x_init_vn_minmax(bp, 2*vn + port);
5529 /* Enable rate shaping and fairness */
5530 bp->cmng.flags.cmng_enables =
5531 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5532 if (bp->vn_weight_sum)
5533 bp->cmng.flags.cmng_enables |=
5534 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5536 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5537 " fairness will be disabled\n");
5539 /* rate shaping and fairness are disabled */
5541 "single function mode minmax will be disabled\n");
5545 /* Store it to internal memory */
5547 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5548 REG_WR(bp, BAR_XSTRORM_INTMEM +
5549 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5550 ((u32 *)(&bp->cmng))[i]);
5553 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5555 switch (load_code) {
5556 case FW_MSG_CODE_DRV_LOAD_COMMON:
5557 bnx2x_init_internal_common(bp);
5560 case FW_MSG_CODE_DRV_LOAD_PORT:
5561 bnx2x_init_internal_port(bp);
5564 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5565 bnx2x_init_internal_func(bp);
5569 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5574 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5578 for_each_queue(bp, i) {
5579 struct bnx2x_fastpath *fp = &bp->fp[i];
5582 fp->state = BNX2X_FP_STATE_CLOSED;
5584 fp->cl_id = BP_L_ID(bp) + i;
5585 fp->sb_id = fp->cl_id;
5586 /* Suitable Rx and Tx SBs are served by the same client */
5587 if (i >= bp->num_rx_queues)
5588 fp->cl_id -= bp->num_rx_queues;
5590 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5591 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5592 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5594 bnx2x_update_fpsb_idx(fp);
5597 /* ensure status block indices were read */
5601 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5603 bnx2x_update_dsb_idx(bp);
5604 bnx2x_update_coalesce(bp);
5605 bnx2x_init_rx_rings(bp);
5606 bnx2x_init_tx_ring(bp);
5607 bnx2x_init_sp_ring(bp);
5608 bnx2x_init_context(bp);
5609 bnx2x_init_internal(bp, load_code);
5610 bnx2x_init_ind_table(bp);
5611 bnx2x_stats_init(bp);
5613 /* At this point, we are ready for interrupts */
5614 atomic_set(&bp->intr_sem, 0);
5616 /* flush all before enabling interrupts */
5620 bnx2x_int_enable(bp);
5622 /* Check for SPIO5 */
5623 bnx2x_attn_int_deasserted0(bp,
5624 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5625 AEU_INPUTS_ATTN_BITS_SPIO5);
5628 /* end of nic init */
5631 * gzip service functions
5634 static int bnx2x_gunzip_init(struct bnx2x *bp)
5636 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5637 &bp->gunzip_mapping);
5638 if (bp->gunzip_buf == NULL)
5641 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5642 if (bp->strm == NULL)
5645 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5647 if (bp->strm->workspace == NULL)
5657 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5658 bp->gunzip_mapping);
5659 bp->gunzip_buf = NULL;
5662 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5663 " un-compression\n", bp->dev->name);
5667 static void bnx2x_gunzip_end(struct bnx2x *bp)
5669 kfree(bp->strm->workspace);
5674 if (bp->gunzip_buf) {
5675 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5676 bp->gunzip_mapping);
5677 bp->gunzip_buf = NULL;
5681 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5685 /* check gzip header */
5686 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5687 BNX2X_ERR("Bad gzip header\n");
5695 if (zbuf[3] & FNAME)
5696 while ((zbuf[n++] != 0) && (n < len));
5698 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5699 bp->strm->avail_in = len - n;
5700 bp->strm->next_out = bp->gunzip_buf;
5701 bp->strm->avail_out = FW_BUF_SIZE;
5703 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5707 rc = zlib_inflate(bp->strm, Z_FINISH);
5708 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5709 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5710 bp->dev->name, bp->strm->msg);
5712 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5713 if (bp->gunzip_outlen & 0x3)
5714 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5715 " gunzip_outlen (%d) not aligned\n",
5716 bp->dev->name, bp->gunzip_outlen);
5717 bp->gunzip_outlen >>= 2;
5719 zlib_inflateEnd(bp->strm);
5721 if (rc == Z_STREAM_END)
5727 /* nic load/unload */
5730 * General service functions
5733 /* send a NIG loopback debug packet */
5734 static void bnx2x_lb_pckt(struct bnx2x *bp)
5738 /* Ethernet source and destination addresses */
5739 wb_write[0] = 0x55555555;
5740 wb_write[1] = 0x55555555;
5741 wb_write[2] = 0x20; /* SOP */
5742 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5744 /* NON-IP protocol */
5745 wb_write[0] = 0x09000000;
5746 wb_write[1] = 0x55555555;
5747 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5748 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5751 /* some of the internal memories
5752 * are not directly readable from the driver
5753 * to test them we send debug packets
5755 static int bnx2x_int_mem_test(struct bnx2x *bp)
5761 if (CHIP_REV_IS_FPGA(bp))
5763 else if (CHIP_REV_IS_EMUL(bp))
5768 DP(NETIF_MSG_HW, "start part1\n");
5770 /* Disable inputs of parser neighbor blocks */
5771 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5772 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5773 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5774 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5776 /* Write 0 to parser credits for CFC search request */
5777 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5779 /* send Ethernet packet */
5782 /* TODO do i reset NIG statistic? */
5783 /* Wait until NIG register shows 1 packet of size 0x10 */
5784 count = 1000 * factor;
5787 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5788 val = *bnx2x_sp(bp, wb_data[0]);
5796 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5800 /* Wait until PRS register shows 1 packet */
5801 count = 1000 * factor;
5803 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5811 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5815 /* Reset and init BRB, PRS */
5816 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5818 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5820 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5821 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5823 DP(NETIF_MSG_HW, "part2\n");
5825 /* Disable inputs of parser neighbor blocks */
5826 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5827 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5828 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5829 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5831 /* Write 0 to parser credits for CFC search request */
5832 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5834 /* send 10 Ethernet packets */
5835 for (i = 0; i < 10; i++)
5838 /* Wait until NIG register shows 10 + 1
5839 packets of size 11*0x10 = 0xb0 */
5840 count = 1000 * factor;
5843 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5844 val = *bnx2x_sp(bp, wb_data[0]);
5852 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5856 /* Wait until PRS register shows 2 packets */
5857 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5859 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5861 /* Write 1 to parser credits for CFC search request */
5862 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5864 /* Wait until PRS register shows 3 packets */
5865 msleep(10 * factor);
5866 /* Wait until NIG register shows 1 packet of size 0x10 */
5867 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5869 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5871 /* clear NIG EOP FIFO */
5872 for (i = 0; i < 11; i++)
5873 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5874 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5876 BNX2X_ERR("clear of NIG failed\n");
5880 /* Reset and init BRB, PRS, NIG */
5881 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5883 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5885 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5886 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5889 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5892 /* Enable inputs of parser neighbor blocks */
5893 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5894 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5895 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5896 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5898 DP(NETIF_MSG_HW, "done\n");
5903 static void enable_blocks_attention(struct bnx2x *bp)
5905 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5906 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5907 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5908 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5909 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5910 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5911 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5912 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5913 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5914 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5915 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5916 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5917 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5918 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5919 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5920 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5921 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5922 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5923 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5924 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5925 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5926 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5927 if (CHIP_REV_IS_FPGA(bp))
5928 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5930 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5931 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5932 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5933 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5934 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5935 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5936 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5937 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5938 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5939 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5943 static void bnx2x_reset_common(struct bnx2x *bp)
5946 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5948 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5951 static void bnx2x_init_pxp(struct bnx2x *bp)
5954 int r_order, w_order;
5956 pci_read_config_word(bp->pdev,
5957 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5958 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5959 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5961 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5963 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5967 bnx2x_init_pxp_arb(bp, r_order, w_order);
5970 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5976 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5977 SHARED_HW_CFG_FAN_FAILURE_MASK;
5979 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5983 * The fan failure mechanism is usually related to the PHY type since
5984 * the power consumption of the board is affected by the PHY. Currently,
5985 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5987 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5988 for (port = PORT_0; port < PORT_MAX; port++) {
5990 SHMEM_RD(bp, dev_info.port_hw_config[port].
5991 external_phy_config) &
5992 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5995 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5997 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5999 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6002 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6004 if (is_required == 0)
6007 /* Fan failure is indicated by SPIO 5 */
6008 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6009 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6011 /* set to active low mode */
6012 val = REG_RD(bp, MISC_REG_SPIO_INT);
6013 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6014 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6015 REG_WR(bp, MISC_REG_SPIO_INT, val);
6017 /* enable interrupt to signal the IGU */
6018 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6019 val |= (1 << MISC_REGISTERS_SPIO_5);
6020 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6023 static int bnx2x_init_common(struct bnx2x *bp)
6027 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6029 bnx2x_reset_common(bp);
6030 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6031 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6033 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6034 if (CHIP_IS_E1H(bp))
6035 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6037 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6039 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6041 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6042 if (CHIP_IS_E1(bp)) {
6043 /* enable HW interrupt from PXP on USDM overflow
6044 bit 16 on INT_MASK_0 */
6045 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6048 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6052 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6053 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6054 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6055 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6056 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6057 /* make sure this value is 0 */
6058 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6060 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6061 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6062 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6063 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6064 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6067 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6069 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6070 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6071 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6074 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6075 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6077 /* let the HW do it's magic ... */
6079 /* finish PXP init */
6080 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6082 BNX2X_ERR("PXP2 CFG failed\n");
6085 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6087 BNX2X_ERR("PXP2 RD_INIT failed\n");
6091 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6092 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6094 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6096 /* clean the DMAE memory */
6098 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6100 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6101 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6102 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6103 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6105 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6106 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6107 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6108 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6110 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6111 /* soft reset pulse */
6112 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6113 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6116 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6119 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6120 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6121 if (!CHIP_REV_IS_SLOW(bp)) {
6122 /* enable hw interrupt from doorbell Q */
6123 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6126 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6127 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6128 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6130 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6131 if (CHIP_IS_E1H(bp))
6132 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6134 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6135 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6136 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6137 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6139 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6140 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6141 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6142 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6144 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6145 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6146 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6147 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6150 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6152 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6155 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6156 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6157 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6159 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6160 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6161 REG_WR(bp, i, 0xc0cac01a);
6162 /* TODO: replace with something meaningful */
6164 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6165 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6167 if (sizeof(union cdu_context) != 1024)
6168 /* we currently assume that a context is 1024 bytes */
6169 printk(KERN_ALERT PFX "please adjust the size of"
6170 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6172 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6173 val = (4 << 24) + (0 << 12) + 1024;
6174 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6176 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6177 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6178 /* enable context validation interrupt from CFC */
6179 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6181 /* set the thresholds to prevent CFC/CDU race */
6182 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6184 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6185 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6187 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6188 /* Reset PCIE errors for debug */
6189 REG_WR(bp, 0x2814, 0xffffffff);
6190 REG_WR(bp, 0x3820, 0xffffffff);
6192 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6193 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6194 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6195 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6197 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6198 if (CHIP_IS_E1H(bp)) {
6199 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6200 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6203 if (CHIP_REV_IS_SLOW(bp))
6206 /* finish CFC init */
6207 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6209 BNX2X_ERR("CFC LL_INIT failed\n");
6212 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6214 BNX2X_ERR("CFC AC_INIT failed\n");
6217 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6219 BNX2X_ERR("CFC CAM_INIT failed\n");
6222 REG_WR(bp, CFC_REG_DEBUG0, 0);
6224 /* read NIG statistic
6225 to see if this is our first up since powerup */
6226 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6227 val = *bnx2x_sp(bp, wb_data[0]);
6229 /* do internal memory self test */
6230 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6231 BNX2X_ERR("internal mem self test failed\n");
6235 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6236 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6237 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6238 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6239 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6240 bp->port.need_hw_lock = 1;
6247 bnx2x_setup_fan_failure_detection(bp);
6249 /* clear PXP2 attentions */
6250 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6252 enable_blocks_attention(bp);
6254 if (!BP_NOMCP(bp)) {
6255 bnx2x_acquire_phy_lock(bp);
6256 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6257 bnx2x_release_phy_lock(bp);
6259 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6264 static int bnx2x_init_port(struct bnx2x *bp)
6266 int port = BP_PORT(bp);
6267 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6271 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6273 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6275 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6276 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6278 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6279 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6280 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6285 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6286 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6287 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6288 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6293 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6294 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6295 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6296 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6301 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6302 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6303 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6304 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6306 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6309 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6310 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6312 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6314 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6316 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6317 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6318 /* no pause for emulation and FPGA */
6323 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6324 else if (bp->dev->mtu > 4096) {
6325 if (bp->flags & ONE_PORT_FLAG)
6329 /* (24*1024 + val*4)/256 */
6330 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6333 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6334 high = low + 56; /* 14*1024/256 */
6336 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6337 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6340 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6342 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6343 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6344 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6345 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6347 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6348 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6349 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6350 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6352 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6353 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6355 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6357 /* configure PBF to work without PAUSE mtu 9000 */
6358 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6360 /* update threshold */
6361 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6362 /* update init credit */
6363 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6366 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6368 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6371 /* tell the searcher where the T2 table is */
6372 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6374 wb_write[0] = U64_LO(bp->t2_mapping);
6375 wb_write[1] = U64_HI(bp->t2_mapping);
6376 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6377 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6378 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6379 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6381 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6383 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6384 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6386 if (CHIP_IS_E1(bp)) {
6387 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6388 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6390 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6392 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6393 /* init aeu_mask_attn_func_0/1:
6394 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6395 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6396 * bits 4-7 are used for "per vn group attention" */
6397 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6398 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6400 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6401 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6402 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6403 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6404 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6406 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6408 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6410 if (CHIP_IS_E1H(bp)) {
6411 /* 0x2 disable e1hov, 0x1 enable */
6412 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6413 (IS_E1HMF(bp) ? 0x1 : 0x2));
6416 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6417 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6418 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6422 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6423 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6425 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6426 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6428 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6430 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6431 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6433 /* The GPIO should be swapped if the swap register is
6435 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6436 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6438 /* Select function upon port-swap configuration */
6440 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6441 aeu_gpio_mask = (swap_val && swap_override) ?
6442 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6443 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6445 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6446 aeu_gpio_mask = (swap_val && swap_override) ?
6447 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6448 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6450 val = REG_RD(bp, offset);
6451 /* add GPIO3 to group */
6452 val |= aeu_gpio_mask;
6453 REG_WR(bp, offset, val);
6457 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6458 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6459 /* add SPIO 5 to group 0 */
6461 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6462 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6463 val = REG_RD(bp, reg_addr);
6464 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6465 REG_WR(bp, reg_addr, val);
6473 bnx2x__link_reset(bp);
6478 #define ILT_PER_FUNC (768/2)
6479 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6480 /* the phys address is shifted right 12 bits and has an added
6481 1=valid bit added to the 53rd bit
6482 then since this is a wide register(TM)
6483 we split it into two 32 bit writes
6485 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6486 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6487 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6488 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6490 #define CNIC_ILT_LINES 0
6492 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6496 if (CHIP_IS_E1H(bp))
6497 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6499 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6501 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6504 static int bnx2x_init_func(struct bnx2x *bp)
6506 int port = BP_PORT(bp);
6507 int func = BP_FUNC(bp);
6511 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6513 /* set MSI reconfigure capability */
6514 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6515 val = REG_RD(bp, addr);
6516 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6517 REG_WR(bp, addr, val);
6519 i = FUNC_ILT_BASE(func);
6521 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6522 if (CHIP_IS_E1H(bp)) {
6523 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6524 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6526 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6527 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6530 if (CHIP_IS_E1H(bp)) {
6531 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6532 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6533 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6534 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6535 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6536 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6537 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6538 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6539 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6541 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6542 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6545 /* HC init per function */
6546 if (CHIP_IS_E1H(bp)) {
6547 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6549 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6550 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6552 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6554 /* Reset PCIE errors for debug */
6555 REG_WR(bp, 0x2114, 0xffffffff);
6556 REG_WR(bp, 0x2120, 0xffffffff);
6561 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6565 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6566 BP_FUNC(bp), load_code);
6569 mutex_init(&bp->dmae_mutex);
6570 rc = bnx2x_gunzip_init(bp);
6574 switch (load_code) {
6575 case FW_MSG_CODE_DRV_LOAD_COMMON:
6576 rc = bnx2x_init_common(bp);
6581 case FW_MSG_CODE_DRV_LOAD_PORT:
6583 rc = bnx2x_init_port(bp);
6588 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6590 rc = bnx2x_init_func(bp);
6596 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6600 if (!BP_NOMCP(bp)) {
6601 int func = BP_FUNC(bp);
6603 bp->fw_drv_pulse_wr_seq =
6604 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6605 DRV_PULSE_SEQ_MASK);
6606 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6609 /* this needs to be done before gunzip end */
6610 bnx2x_zero_def_sb(bp);
6611 for_each_queue(bp, i)
6612 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6615 bnx2x_gunzip_end(bp);
6620 static void bnx2x_free_mem(struct bnx2x *bp)
6623 #define BNX2X_PCI_FREE(x, y, size) \
6626 pci_free_consistent(bp->pdev, size, x, y); \
6632 #define BNX2X_FREE(x) \
6644 for_each_queue(bp, i) {
6647 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6648 bnx2x_fp(bp, i, status_blk_mapping),
6649 sizeof(struct host_status_block));
6652 for_each_rx_queue(bp, i) {
6654 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6655 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6656 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6657 bnx2x_fp(bp, i, rx_desc_mapping),
6658 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6660 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6661 bnx2x_fp(bp, i, rx_comp_mapping),
6662 sizeof(struct eth_fast_path_rx_cqe) *
6666 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6667 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6668 bnx2x_fp(bp, i, rx_sge_mapping),
6669 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6672 for_each_tx_queue(bp, i) {
6674 /* fastpath tx rings: tx_buf tx_desc */
6675 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6676 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6677 bnx2x_fp(bp, i, tx_desc_mapping),
6678 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6680 /* end of fastpath */
6682 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6683 sizeof(struct host_def_status_block));
6685 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6686 sizeof(struct bnx2x_slowpath));
6689 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6690 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6691 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6692 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6694 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6696 #undef BNX2X_PCI_FREE
6700 static int bnx2x_alloc_mem(struct bnx2x *bp)
6703 #define BNX2X_PCI_ALLOC(x, y, size) \
6705 x = pci_alloc_consistent(bp->pdev, size, y); \
6707 goto alloc_mem_err; \
6708 memset(x, 0, size); \
6711 #define BNX2X_ALLOC(x, size) \
6713 x = vmalloc(size); \
6715 goto alloc_mem_err; \
6716 memset(x, 0, size); \
6723 for_each_queue(bp, i) {
6724 bnx2x_fp(bp, i, bp) = bp;
6727 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6728 &bnx2x_fp(bp, i, status_blk_mapping),
6729 sizeof(struct host_status_block));
6732 for_each_rx_queue(bp, i) {
6734 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6735 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6736 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6737 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6738 &bnx2x_fp(bp, i, rx_desc_mapping),
6739 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6741 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6742 &bnx2x_fp(bp, i, rx_comp_mapping),
6743 sizeof(struct eth_fast_path_rx_cqe) *
6747 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6748 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6749 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6750 &bnx2x_fp(bp, i, rx_sge_mapping),
6751 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6754 for_each_tx_queue(bp, i) {
6756 /* fastpath tx rings: tx_buf tx_desc */
6757 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6758 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6759 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6760 &bnx2x_fp(bp, i, tx_desc_mapping),
6761 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6763 /* end of fastpath */
6765 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6766 sizeof(struct host_def_status_block));
6768 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6769 sizeof(struct bnx2x_slowpath));
6772 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6775 for (i = 0; i < 64*1024; i += 64) {
6776 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6777 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6780 /* allocate searcher T2 table
6781 we allocate 1/4 of alloc num for T2
6782 (which is not entered into the ILT) */
6783 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6786 for (i = 0; i < 16*1024; i += 64)
6787 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6789 /* now fixup the last line in the block to point to the next block */
6790 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6792 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6793 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6795 /* QM queues (128*MAX_CONN) */
6796 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6799 /* Slow path ring */
6800 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6808 #undef BNX2X_PCI_ALLOC
6812 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6816 for_each_tx_queue(bp, i) {
6817 struct bnx2x_fastpath *fp = &bp->fp[i];
6819 u16 bd_cons = fp->tx_bd_cons;
6820 u16 sw_prod = fp->tx_pkt_prod;
6821 u16 sw_cons = fp->tx_pkt_cons;
6823 while (sw_cons != sw_prod) {
6824 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6830 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6834 for_each_rx_queue(bp, j) {
6835 struct bnx2x_fastpath *fp = &bp->fp[j];
6837 for (i = 0; i < NUM_RX_BD; i++) {
6838 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6839 struct sk_buff *skb = rx_buf->skb;
6844 pci_unmap_single(bp->pdev,
6845 pci_unmap_addr(rx_buf, mapping),
6846 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6851 if (!fp->disable_tpa)
6852 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6853 ETH_MAX_AGGREGATION_QUEUES_E1 :
6854 ETH_MAX_AGGREGATION_QUEUES_E1H);
6858 static void bnx2x_free_skbs(struct bnx2x *bp)
6860 bnx2x_free_tx_skbs(bp);
6861 bnx2x_free_rx_skbs(bp);
6864 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6868 free_irq(bp->msix_table[0].vector, bp->dev);
6869 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6870 bp->msix_table[0].vector);
6872 for_each_queue(bp, i) {
6873 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6874 "state %x\n", i, bp->msix_table[i + offset].vector,
6875 bnx2x_fp(bp, i, state));
6877 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6881 static void bnx2x_free_irq(struct bnx2x *bp)
6883 if (bp->flags & USING_MSIX_FLAG) {
6884 bnx2x_free_msix_irqs(bp);
6885 pci_disable_msix(bp->pdev);
6886 bp->flags &= ~USING_MSIX_FLAG;
6888 } else if (bp->flags & USING_MSI_FLAG) {
6889 free_irq(bp->pdev->irq, bp->dev);
6890 pci_disable_msi(bp->pdev);
6891 bp->flags &= ~USING_MSI_FLAG;
6894 free_irq(bp->pdev->irq, bp->dev);
6897 static int bnx2x_enable_msix(struct bnx2x *bp)
6899 int i, rc, offset = 1;
6902 bp->msix_table[0].entry = igu_vec;
6903 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6905 for_each_queue(bp, i) {
6906 igu_vec = BP_L_ID(bp) + offset + i;
6907 bp->msix_table[i + offset].entry = igu_vec;
6908 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6909 "(fastpath #%u)\n", i + offset, igu_vec, i);
6912 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6913 BNX2X_NUM_QUEUES(bp) + offset);
6915 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6919 bp->flags |= USING_MSIX_FLAG;
6924 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6926 int i, rc, offset = 1;
6928 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6929 bp->dev->name, bp->dev);
6931 BNX2X_ERR("request sp irq failed\n");
6935 for_each_queue(bp, i) {
6936 struct bnx2x_fastpath *fp = &bp->fp[i];
6938 if (i < bp->num_rx_queues)
6939 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6941 sprintf(fp->name, "%s-tx-%d",
6942 bp->dev->name, i - bp->num_rx_queues);
6944 rc = request_irq(bp->msix_table[i + offset].vector,
6945 bnx2x_msix_fp_int, 0, fp->name, fp);
6947 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6948 bnx2x_free_msix_irqs(bp);
6952 fp->state = BNX2X_FP_STATE_IRQ;
6955 i = BNX2X_NUM_QUEUES(bp);
6956 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6958 bp->dev->name, bp->msix_table[0].vector,
6959 0, bp->msix_table[offset].vector,
6960 i - 1, bp->msix_table[offset + i - 1].vector);
6965 static int bnx2x_enable_msi(struct bnx2x *bp)
6969 rc = pci_enable_msi(bp->pdev);
6971 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6974 bp->flags |= USING_MSI_FLAG;
6979 static int bnx2x_req_irq(struct bnx2x *bp)
6981 unsigned long flags;
6984 if (bp->flags & USING_MSI_FLAG)
6987 flags = IRQF_SHARED;
6989 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6990 bp->dev->name, bp->dev);
6992 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6997 static void bnx2x_napi_enable(struct bnx2x *bp)
7001 for_each_rx_queue(bp, i)
7002 napi_enable(&bnx2x_fp(bp, i, napi));
7005 static void bnx2x_napi_disable(struct bnx2x *bp)
7009 for_each_rx_queue(bp, i)
7010 napi_disable(&bnx2x_fp(bp, i, napi));
7013 static void bnx2x_netif_start(struct bnx2x *bp)
7017 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7018 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7021 if (netif_running(bp->dev)) {
7022 bnx2x_napi_enable(bp);
7023 bnx2x_int_enable(bp);
7024 if (bp->state == BNX2X_STATE_OPEN)
7025 netif_tx_wake_all_queues(bp->dev);
7030 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7032 bnx2x_int_disable_sync(bp, disable_hw);
7033 bnx2x_napi_disable(bp);
7034 netif_tx_disable(bp->dev);
7035 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7039 * Init service functions
7043 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7045 * @param bp driver descriptor
7046 * @param set set or clear an entry (1 or 0)
7047 * @param mac pointer to a buffer containing a MAC
7048 * @param cl_bit_vec bit vector of clients to register a MAC for
7049 * @param cam_offset offset in a CAM to use
7050 * @param with_bcast set broadcast MAC as well
7052 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7053 u32 cl_bit_vec, u8 cam_offset,
7056 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7057 int port = BP_PORT(bp);
7060 * unicasts 0-31:port0 32-63:port1
7061 * multicast 64-127:port0 128-191:port1
7063 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7064 config->hdr.offset = cam_offset;
7065 config->hdr.client_id = 0xff;
7066 config->hdr.reserved1 = 0;
7069 config->config_table[0].cam_entry.msb_mac_addr =
7070 swab16(*(u16 *)&mac[0]);
7071 config->config_table[0].cam_entry.middle_mac_addr =
7072 swab16(*(u16 *)&mac[2]);
7073 config->config_table[0].cam_entry.lsb_mac_addr =
7074 swab16(*(u16 *)&mac[4]);
7075 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7077 config->config_table[0].target_table_entry.flags = 0;
7079 CAM_INVALIDATE(config->config_table[0]);
7080 config->config_table[0].target_table_entry.clients_bit_vector =
7081 cpu_to_le32(cl_bit_vec);
7082 config->config_table[0].target_table_entry.vlan_id = 0;
7084 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7085 (set ? "setting" : "clearing"),
7086 config->config_table[0].cam_entry.msb_mac_addr,
7087 config->config_table[0].cam_entry.middle_mac_addr,
7088 config->config_table[0].cam_entry.lsb_mac_addr);
7092 config->config_table[1].cam_entry.msb_mac_addr =
7093 cpu_to_le16(0xffff);
7094 config->config_table[1].cam_entry.middle_mac_addr =
7095 cpu_to_le16(0xffff);
7096 config->config_table[1].cam_entry.lsb_mac_addr =
7097 cpu_to_le16(0xffff);
7098 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7100 config->config_table[1].target_table_entry.flags =
7101 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7103 CAM_INVALIDATE(config->config_table[1]);
7104 config->config_table[1].target_table_entry.clients_bit_vector =
7105 cpu_to_le32(cl_bit_vec);
7106 config->config_table[1].target_table_entry.vlan_id = 0;
7109 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7110 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7111 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7115 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7117 * @param bp driver descriptor
7118 * @param set set or clear an entry (1 or 0)
7119 * @param mac pointer to a buffer containing a MAC
7120 * @param cl_bit_vec bit vector of clients to register a MAC for
7121 * @param cam_offset offset in a CAM to use
7123 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7124 u32 cl_bit_vec, u8 cam_offset)
7126 struct mac_configuration_cmd_e1h *config =
7127 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7129 config->hdr.length = 1;
7130 config->hdr.offset = cam_offset;
7131 config->hdr.client_id = 0xff;
7132 config->hdr.reserved1 = 0;
7135 config->config_table[0].msb_mac_addr =
7136 swab16(*(u16 *)&mac[0]);
7137 config->config_table[0].middle_mac_addr =
7138 swab16(*(u16 *)&mac[2]);
7139 config->config_table[0].lsb_mac_addr =
7140 swab16(*(u16 *)&mac[4]);
7141 config->config_table[0].clients_bit_vector =
7142 cpu_to_le32(cl_bit_vec);
7143 config->config_table[0].vlan_id = 0;
7144 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7146 config->config_table[0].flags = BP_PORT(bp);
7148 config->config_table[0].flags =
7149 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7151 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7152 (set ? "setting" : "clearing"),
7153 config->config_table[0].msb_mac_addr,
7154 config->config_table[0].middle_mac_addr,
7155 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7157 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7158 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7159 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7162 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7163 int *state_p, int poll)
7165 /* can take a while if any port is running */
7168 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7169 poll ? "polling" : "waiting", state, idx);
7174 bnx2x_rx_int(bp->fp, 10);
7175 /* if index is different from 0
7176 * the reply for some commands will
7177 * be on the non default queue
7180 bnx2x_rx_int(&bp->fp[idx], 10);
7183 mb(); /* state is changed by bnx2x_sp_event() */
7184 if (*state_p == state) {
7185 #ifdef BNX2X_STOP_ON_ERROR
7186 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7198 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7199 poll ? "polling" : "waiting", state, idx);
7200 #ifdef BNX2X_STOP_ON_ERROR
7207 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7209 bp->set_mac_pending++;
7212 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7213 (1 << bp->fp->cl_id), BP_FUNC(bp));
7215 /* Wait for a completion */
7216 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7219 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7221 bp->set_mac_pending++;
7224 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7225 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7228 /* Wait for a completion */
7229 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7232 static int bnx2x_setup_leading(struct bnx2x *bp)
7236 /* reset IGU state */
7237 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7240 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7242 /* Wait for completion */
7243 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7248 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7250 struct bnx2x_fastpath *fp = &bp->fp[index];
7252 /* reset IGU state */
7253 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7256 fp->state = BNX2X_FP_STATE_OPENING;
7257 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7260 /* Wait for completion */
7261 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7265 static int bnx2x_poll(struct napi_struct *napi, int budget);
7267 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7268 int *num_tx_queues_out)
7270 int _num_rx_queues = 0, _num_tx_queues = 0;
7272 switch (bp->multi_mode) {
7273 case ETH_RSS_MODE_DISABLED:
7278 case ETH_RSS_MODE_REGULAR:
7280 _num_rx_queues = min_t(u32, num_rx_queues,
7281 BNX2X_MAX_QUEUES(bp));
7283 _num_rx_queues = min_t(u32, num_online_cpus(),
7284 BNX2X_MAX_QUEUES(bp));
7287 _num_tx_queues = min_t(u32, num_tx_queues,
7288 BNX2X_MAX_QUEUES(bp));
7290 _num_tx_queues = min_t(u32, num_online_cpus(),
7291 BNX2X_MAX_QUEUES(bp));
7293 /* There must be not more Tx queues than Rx queues */
7294 if (_num_tx_queues > _num_rx_queues) {
7295 BNX2X_ERR("number of tx queues (%d) > "
7296 "number of rx queues (%d)"
7297 " defaulting to %d\n",
7298 _num_tx_queues, _num_rx_queues,
7300 _num_tx_queues = _num_rx_queues;
7311 *num_rx_queues_out = _num_rx_queues;
7312 *num_tx_queues_out = _num_tx_queues;
7315 static int bnx2x_set_int_mode(struct bnx2x *bp)
7322 bp->num_rx_queues = 1;
7323 bp->num_tx_queues = 1;
7324 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7329 /* Set interrupt mode according to bp->multi_mode value */
7330 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7331 &bp->num_tx_queues);
7333 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7334 bp->num_rx_queues, bp->num_tx_queues);
7336 /* if we can't use MSI-X we only need one fp,
7337 * so try to enable MSI-X with the requested number of fp's
7338 * and fallback to MSI or legacy INTx with one fp
7340 rc = bnx2x_enable_msix(bp);
7342 /* failed to enable MSI-X */
7344 BNX2X_ERR("Multi requested but failed to "
7345 "enable MSI-X (rx %d tx %d), "
7346 "set number of queues to 1\n",
7347 bp->num_rx_queues, bp->num_tx_queues);
7348 bp->num_rx_queues = 1;
7349 bp->num_tx_queues = 1;
7353 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7358 /* must be called with rtnl_lock */
7359 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7364 #ifdef BNX2X_STOP_ON_ERROR
7365 if (unlikely(bp->panic))
7369 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7371 rc = bnx2x_set_int_mode(bp);
7373 if (bnx2x_alloc_mem(bp))
7376 for_each_rx_queue(bp, i)
7377 bnx2x_fp(bp, i, disable_tpa) =
7378 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7380 for_each_rx_queue(bp, i)
7381 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7384 bnx2x_napi_enable(bp);
7386 if (bp->flags & USING_MSIX_FLAG) {
7387 rc = bnx2x_req_msix_irqs(bp);
7389 pci_disable_msix(bp->pdev);
7393 /* Fall to INTx if failed to enable MSI-X due to lack of
7394 memory (in bnx2x_set_int_mode()) */
7395 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7396 bnx2x_enable_msi(bp);
7398 rc = bnx2x_req_irq(bp);
7400 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7401 if (bp->flags & USING_MSI_FLAG)
7402 pci_disable_msi(bp->pdev);
7405 if (bp->flags & USING_MSI_FLAG) {
7406 bp->dev->irq = bp->pdev->irq;
7407 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7408 bp->dev->name, bp->pdev->irq);
7412 /* Send LOAD_REQUEST command to MCP
7413 Returns the type of LOAD command:
7414 if it is the first port to be initialized
7415 common blocks should be initialized, otherwise - not
7417 if (!BP_NOMCP(bp)) {
7418 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7420 BNX2X_ERR("MCP response failure, aborting\n");
7424 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7425 rc = -EBUSY; /* other port in diagnostic mode */
7430 int port = BP_PORT(bp);
7432 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7433 load_count[0], load_count[1], load_count[2]);
7435 load_count[1 + port]++;
7436 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7437 load_count[0], load_count[1], load_count[2]);
7438 if (load_count[0] == 1)
7439 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7440 else if (load_count[1 + port] == 1)
7441 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7443 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7446 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7447 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7451 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7454 rc = bnx2x_init_hw(bp, load_code);
7456 BNX2X_ERR("HW init failed, aborting\n");
7460 /* Setup NIC internals and enable interrupts */
7461 bnx2x_nic_init(bp, load_code);
7463 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7464 (bp->common.shmem2_base))
7465 SHMEM2_WR(bp, dcc_support,
7466 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7467 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7469 /* Send LOAD_DONE command to MCP */
7470 if (!BP_NOMCP(bp)) {
7471 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7473 BNX2X_ERR("MCP response failure, aborting\n");
7479 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7481 rc = bnx2x_setup_leading(bp);
7483 BNX2X_ERR("Setup leading failed!\n");
7484 #ifndef BNX2X_STOP_ON_ERROR
7492 if (CHIP_IS_E1H(bp))
7493 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7494 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7495 bp->state = BNX2X_STATE_DISABLED;
7498 if (bp->state == BNX2X_STATE_OPEN) {
7499 for_each_nondefault_queue(bp, i) {
7500 rc = bnx2x_setup_multi(bp, i);
7506 bnx2x_set_eth_mac_addr_e1(bp, 1);
7508 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7512 bnx2x_initial_phy_init(bp, load_mode);
7514 /* Start fast path */
7515 switch (load_mode) {
7517 if (bp->state == BNX2X_STATE_OPEN) {
7518 /* Tx queue should be only reenabled */
7519 netif_tx_wake_all_queues(bp->dev);
7521 /* Initialize the receive filter. */
7522 bnx2x_set_rx_mode(bp->dev);
7526 netif_tx_start_all_queues(bp->dev);
7527 if (bp->state != BNX2X_STATE_OPEN)
7528 netif_tx_disable(bp->dev);
7529 /* Initialize the receive filter. */
7530 bnx2x_set_rx_mode(bp->dev);
7534 /* Initialize the receive filter. */
7535 bnx2x_set_rx_mode(bp->dev);
7536 bp->state = BNX2X_STATE_DIAG;
7544 bnx2x__link_status_update(bp);
7546 /* start the timer */
7547 mod_timer(&bp->timer, jiffies + bp->current_interval);
7553 bnx2x_int_disable_sync(bp, 1);
7554 if (!BP_NOMCP(bp)) {
7555 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7556 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7559 /* Free SKBs, SGEs, TPA pool and driver internals */
7560 bnx2x_free_skbs(bp);
7561 for_each_rx_queue(bp, i)
7562 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7567 bnx2x_napi_disable(bp);
7568 for_each_rx_queue(bp, i)
7569 netif_napi_del(&bnx2x_fp(bp, i, napi));
7575 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7577 struct bnx2x_fastpath *fp = &bp->fp[index];
7580 /* halt the connection */
7581 fp->state = BNX2X_FP_STATE_HALTING;
7582 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7584 /* Wait for completion */
7585 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7587 if (rc) /* timeout */
7590 /* delete cfc entry */
7591 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7593 /* Wait for completion */
7594 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7599 static int bnx2x_stop_leading(struct bnx2x *bp)
7601 __le16 dsb_sp_prod_idx;
7602 /* if the other port is handling traffic,
7603 this can take a lot of time */
7609 /* Send HALT ramrod */
7610 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7611 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7613 /* Wait for completion */
7614 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7615 &(bp->fp[0].state), 1);
7616 if (rc) /* timeout */
7619 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7621 /* Send PORT_DELETE ramrod */
7622 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7624 /* Wait for completion to arrive on default status block
7625 we are going to reset the chip anyway
7626 so there is not much to do if this times out
7628 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7630 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7631 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7632 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7633 #ifdef BNX2X_STOP_ON_ERROR
7641 rmb(); /* Refresh the dsb_sp_prod */
7643 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7644 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7649 static void bnx2x_reset_func(struct bnx2x *bp)
7651 int port = BP_PORT(bp);
7652 int func = BP_FUNC(bp);
7656 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7660 base = FUNC_ILT_BASE(func);
7661 for (i = base; i < base + ILT_PER_FUNC; i++)
7662 bnx2x_ilt_wr(bp, i, 0);
7665 static void bnx2x_reset_port(struct bnx2x *bp)
7667 int port = BP_PORT(bp);
7670 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7672 /* Do not rcv packets to BRB */
7673 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7674 /* Do not direct rcv packets that are not for MCP to the BRB */
7675 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7676 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7679 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7682 /* Check for BRB port occupancy */
7683 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7685 DP(NETIF_MSG_IFDOWN,
7686 "BRB1 is not empty %d blocks are occupied\n", val);
7688 /* TODO: Close Doorbell port? */
7691 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7693 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7694 BP_FUNC(bp), reset_code);
7696 switch (reset_code) {
7697 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7698 bnx2x_reset_port(bp);
7699 bnx2x_reset_func(bp);
7700 bnx2x_reset_common(bp);
7703 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7704 bnx2x_reset_port(bp);
7705 bnx2x_reset_func(bp);
7708 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7709 bnx2x_reset_func(bp);
7713 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7718 /* must be called with rtnl_lock */
7719 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7721 int port = BP_PORT(bp);
7725 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7727 /* Set "drop all" */
7728 bp->rx_mode = BNX2X_RX_MODE_NONE;
7729 bnx2x_set_storm_rx_mode(bp);
7731 /* Disable HW interrupts, NAPI and Tx */
7732 bnx2x_netif_stop(bp, 1);
7734 del_timer_sync(&bp->timer);
7735 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7736 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7737 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7742 /* Wait until tx fastpath tasks complete */
7743 for_each_tx_queue(bp, i) {
7744 struct bnx2x_fastpath *fp = &bp->fp[i];
7747 while (bnx2x_has_tx_work_unload(fp)) {
7751 BNX2X_ERR("timeout waiting for queue[%d]\n",
7753 #ifdef BNX2X_STOP_ON_ERROR
7764 /* Give HW time to discard old tx messages */
7767 if (CHIP_IS_E1(bp)) {
7768 struct mac_configuration_cmd *config =
7769 bnx2x_sp(bp, mcast_config);
7771 bnx2x_set_eth_mac_addr_e1(bp, 0);
7773 for (i = 0; i < config->hdr.length; i++)
7774 CAM_INVALIDATE(config->config_table[i]);
7776 config->hdr.length = i;
7777 if (CHIP_REV_IS_SLOW(bp))
7778 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7780 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7781 config->hdr.client_id = bp->fp->cl_id;
7782 config->hdr.reserved1 = 0;
7784 bp->set_mac_pending++;
7787 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7788 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7789 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7792 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7794 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7796 for (i = 0; i < MC_HASH_SIZE; i++)
7797 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7799 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7802 if (unload_mode == UNLOAD_NORMAL)
7803 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7805 else if (bp->flags & NO_WOL_FLAG)
7806 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7809 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7810 u8 *mac_addr = bp->dev->dev_addr;
7812 /* The mac address is written to entries 1-4 to
7813 preserve entry 0 which is used by the PMF */
7814 u8 entry = (BP_E1HVN(bp) + 1)*8;
7816 val = (mac_addr[0] << 8) | mac_addr[1];
7817 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7819 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7820 (mac_addr[4] << 8) | mac_addr[5];
7821 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7823 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7826 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7828 /* Close multi and leading connections
7829 Completions for ramrods are collected in a synchronous way */
7830 for_each_nondefault_queue(bp, i)
7831 if (bnx2x_stop_multi(bp, i))
7834 rc = bnx2x_stop_leading(bp);
7836 BNX2X_ERR("Stop leading failed!\n");
7837 #ifdef BNX2X_STOP_ON_ERROR
7846 reset_code = bnx2x_fw_command(bp, reset_code);
7848 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7849 load_count[0], load_count[1], load_count[2]);
7851 load_count[1 + port]--;
7852 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7853 load_count[0], load_count[1], load_count[2]);
7854 if (load_count[0] == 0)
7855 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7856 else if (load_count[1 + port] == 0)
7857 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7859 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7862 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7863 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7864 bnx2x__link_reset(bp);
7866 /* Reset the chip */
7867 bnx2x_reset_chip(bp, reset_code);
7869 /* Report UNLOAD_DONE to MCP */
7871 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7875 /* Free SKBs, SGEs, TPA pool and driver internals */
7876 bnx2x_free_skbs(bp);
7877 for_each_rx_queue(bp, i)
7878 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7879 for_each_rx_queue(bp, i)
7880 netif_napi_del(&bnx2x_fp(bp, i, napi));
7883 bp->state = BNX2X_STATE_CLOSED;
7885 netif_carrier_off(bp->dev);
7890 static void bnx2x_reset_task(struct work_struct *work)
7892 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7894 #ifdef BNX2X_STOP_ON_ERROR
7895 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7896 " so reset not done to allow debug dump,\n"
7897 " you will need to reboot when done\n");
7903 if (!netif_running(bp->dev))
7904 goto reset_task_exit;
7906 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7907 bnx2x_nic_load(bp, LOAD_NORMAL);
7913 /* end of nic load/unload */
7918 * Init service functions
7921 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7924 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7925 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7926 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7927 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7928 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7929 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7930 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7931 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7933 BNX2X_ERR("Unsupported function index: %d\n", func);
7938 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7940 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7942 /* Flush all outstanding writes */
7945 /* Pretend to be function 0 */
7947 /* Flush the GRC transaction (in the chip) */
7948 new_val = REG_RD(bp, reg);
7950 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7955 /* From now we are in the "like-E1" mode */
7956 bnx2x_int_disable(bp);
7958 /* Flush all outstanding writes */
7961 /* Restore the original funtion settings */
7962 REG_WR(bp, reg, orig_func);
7963 new_val = REG_RD(bp, reg);
7964 if (new_val != orig_func) {
7965 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7966 orig_func, new_val);
7971 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7973 if (CHIP_IS_E1H(bp))
7974 bnx2x_undi_int_disable_e1h(bp, func);
7976 bnx2x_int_disable(bp);
7979 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7983 /* Check if there is any driver already loaded */
7984 val = REG_RD(bp, MISC_REG_UNPREPARED);
7986 /* Check if it is the UNDI driver
7987 * UNDI driver initializes CID offset for normal bell to 0x7
7989 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7990 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7992 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7994 int func = BP_FUNC(bp);
7998 /* clear the UNDI indication */
7999 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8001 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8003 /* try unload UNDI on port 0 */
8006 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8007 DRV_MSG_SEQ_NUMBER_MASK);
8008 reset_code = bnx2x_fw_command(bp, reset_code);
8010 /* if UNDI is loaded on the other port */
8011 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8013 /* send "DONE" for previous unload */
8014 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8016 /* unload UNDI on port 1 */
8019 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8020 DRV_MSG_SEQ_NUMBER_MASK);
8021 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8023 bnx2x_fw_command(bp, reset_code);
8026 /* now it's safe to release the lock */
8027 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8029 bnx2x_undi_int_disable(bp, func);
8031 /* close input traffic and wait for it */
8032 /* Do not rcv packets to BRB */
8034 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8035 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8036 /* Do not direct rcv packets that are not for MCP to
8039 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8040 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8043 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8044 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8047 /* save NIG port swap info */
8048 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8049 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8052 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8055 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8057 /* take the NIG out of reset and restore swap values */
8059 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8060 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8061 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8062 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8064 /* send unload done to the MCP */
8065 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8067 /* restore our func and fw_seq */
8070 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8071 DRV_MSG_SEQ_NUMBER_MASK);
8074 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8078 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8080 u32 val, val2, val3, val4, id;
8083 /* Get the chip revision id and number. */
8084 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8085 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8086 id = ((val & 0xffff) << 16);
8087 val = REG_RD(bp, MISC_REG_CHIP_REV);
8088 id |= ((val & 0xf) << 12);
8089 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8090 id |= ((val & 0xff) << 4);
8091 val = REG_RD(bp, MISC_REG_BOND_ID);
8093 bp->common.chip_id = id;
8094 bp->link_params.chip_id = bp->common.chip_id;
8095 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8097 val = (REG_RD(bp, 0x2874) & 0x55);
8098 if ((bp->common.chip_id & 0x1) ||
8099 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8100 bp->flags |= ONE_PORT_FLAG;
8101 BNX2X_DEV_INFO("single port device\n");
8104 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8105 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8106 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8107 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8108 bp->common.flash_size, bp->common.flash_size);
8110 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8111 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8112 bp->link_params.shmem_base = bp->common.shmem_base;
8113 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8114 bp->common.shmem_base, bp->common.shmem2_base);
8116 if (!bp->common.shmem_base ||
8117 (bp->common.shmem_base < 0xA0000) ||
8118 (bp->common.shmem_base >= 0xC0000)) {
8119 BNX2X_DEV_INFO("MCP not active\n");
8120 bp->flags |= NO_MCP_FLAG;
8124 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8125 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8126 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8127 BNX2X_ERR("BAD MCP validity signature\n");
8129 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8130 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8132 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8133 SHARED_HW_CFG_LED_MODE_MASK) >>
8134 SHARED_HW_CFG_LED_MODE_SHIFT);
8136 bp->link_params.feature_config_flags = 0;
8137 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8138 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8139 bp->link_params.feature_config_flags |=
8140 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8142 bp->link_params.feature_config_flags &=
8143 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8145 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8146 bp->common.bc_ver = val;
8147 BNX2X_DEV_INFO("bc_ver %X\n", val);
8148 if (val < BNX2X_BC_VER) {
8149 /* for now only warn
8150 * later we might need to enforce this */
8151 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8152 " please upgrade BC\n", BNX2X_BC_VER, val);
8154 bp->link_params.feature_config_flags |=
8155 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8156 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8158 if (BP_E1HVN(bp) == 0) {
8159 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8160 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8162 /* no WOL capability for E1HVN != 0 */
8163 bp->flags |= NO_WOL_FLAG;
8165 BNX2X_DEV_INFO("%sWoL capable\n",
8166 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8168 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8169 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8170 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8171 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8173 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8174 val, val2, val3, val4);
8177 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8180 int port = BP_PORT(bp);
8183 switch (switch_cfg) {
8185 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8188 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8189 switch (ext_phy_type) {
8190 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8191 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8194 bp->port.supported |= (SUPPORTED_10baseT_Half |
8195 SUPPORTED_10baseT_Full |
8196 SUPPORTED_100baseT_Half |
8197 SUPPORTED_100baseT_Full |
8198 SUPPORTED_1000baseT_Full |
8199 SUPPORTED_2500baseX_Full |
8204 SUPPORTED_Asym_Pause);
8207 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8208 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8211 bp->port.supported |= (SUPPORTED_10baseT_Half |
8212 SUPPORTED_10baseT_Full |
8213 SUPPORTED_100baseT_Half |
8214 SUPPORTED_100baseT_Full |
8215 SUPPORTED_1000baseT_Full |
8220 SUPPORTED_Asym_Pause);
8224 BNX2X_ERR("NVRAM config error. "
8225 "BAD SerDes ext_phy_config 0x%x\n",
8226 bp->link_params.ext_phy_config);
8230 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8232 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8235 case SWITCH_CFG_10G:
8236 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8239 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8240 switch (ext_phy_type) {
8241 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8242 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8245 bp->port.supported |= (SUPPORTED_10baseT_Half |
8246 SUPPORTED_10baseT_Full |
8247 SUPPORTED_100baseT_Half |
8248 SUPPORTED_100baseT_Full |
8249 SUPPORTED_1000baseT_Full |
8250 SUPPORTED_2500baseX_Full |
8251 SUPPORTED_10000baseT_Full |
8256 SUPPORTED_Asym_Pause);
8259 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8260 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8263 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8264 SUPPORTED_1000baseT_Full |
8268 SUPPORTED_Asym_Pause);
8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8272 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8275 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8276 SUPPORTED_2500baseX_Full |
8277 SUPPORTED_1000baseT_Full |
8281 SUPPORTED_Asym_Pause);
8284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8285 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8288 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8291 SUPPORTED_Asym_Pause);
8294 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8295 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8298 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8299 SUPPORTED_1000baseT_Full |
8302 SUPPORTED_Asym_Pause);
8305 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8306 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8309 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8310 SUPPORTED_1000baseT_Full |
8314 SUPPORTED_Asym_Pause);
8317 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8318 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8321 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8322 SUPPORTED_1000baseT_Full |
8326 SUPPORTED_Asym_Pause);
8329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8330 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8333 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8337 SUPPORTED_Asym_Pause);
8340 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8341 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8344 bp->port.supported |= (SUPPORTED_10baseT_Half |
8345 SUPPORTED_10baseT_Full |
8346 SUPPORTED_100baseT_Half |
8347 SUPPORTED_100baseT_Full |
8348 SUPPORTED_1000baseT_Full |
8349 SUPPORTED_10000baseT_Full |
8353 SUPPORTED_Asym_Pause);
8356 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8357 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8358 bp->link_params.ext_phy_config);
8362 BNX2X_ERR("NVRAM config error. "
8363 "BAD XGXS ext_phy_config 0x%x\n",
8364 bp->link_params.ext_phy_config);
8368 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8370 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8375 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8376 bp->port.link_config);
8379 bp->link_params.phy_addr = bp->port.phy_addr;
8381 /* mask what we support according to speed_cap_mask */
8382 if (!(bp->link_params.speed_cap_mask &
8383 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8384 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8386 if (!(bp->link_params.speed_cap_mask &
8387 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8388 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8390 if (!(bp->link_params.speed_cap_mask &
8391 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8392 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8394 if (!(bp->link_params.speed_cap_mask &
8395 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8396 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8398 if (!(bp->link_params.speed_cap_mask &
8399 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8400 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8401 SUPPORTED_1000baseT_Full);
8403 if (!(bp->link_params.speed_cap_mask &
8404 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8405 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8407 if (!(bp->link_params.speed_cap_mask &
8408 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8409 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8411 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8414 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8416 bp->link_params.req_duplex = DUPLEX_FULL;
8418 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8419 case PORT_FEATURE_LINK_SPEED_AUTO:
8420 if (bp->port.supported & SUPPORTED_Autoneg) {
8421 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8422 bp->port.advertising = bp->port.supported;
8425 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8427 if ((ext_phy_type ==
8428 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8430 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8431 /* force 10G, no AN */
8432 bp->link_params.req_line_speed = SPEED_10000;
8433 bp->port.advertising =
8434 (ADVERTISED_10000baseT_Full |
8438 BNX2X_ERR("NVRAM config error. "
8439 "Invalid link_config 0x%x"
8440 " Autoneg not supported\n",
8441 bp->port.link_config);
8446 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8447 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8448 bp->link_params.req_line_speed = SPEED_10;
8449 bp->port.advertising = (ADVERTISED_10baseT_Full |
8452 BNX2X_ERR("NVRAM config error. "
8453 "Invalid link_config 0x%x"
8454 " speed_cap_mask 0x%x\n",
8455 bp->port.link_config,
8456 bp->link_params.speed_cap_mask);
8461 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8462 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8463 bp->link_params.req_line_speed = SPEED_10;
8464 bp->link_params.req_duplex = DUPLEX_HALF;
8465 bp->port.advertising = (ADVERTISED_10baseT_Half |
8468 BNX2X_ERR("NVRAM config error. "
8469 "Invalid link_config 0x%x"
8470 " speed_cap_mask 0x%x\n",
8471 bp->port.link_config,
8472 bp->link_params.speed_cap_mask);
8477 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8478 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8479 bp->link_params.req_line_speed = SPEED_100;
8480 bp->port.advertising = (ADVERTISED_100baseT_Full |
8483 BNX2X_ERR("NVRAM config error. "
8484 "Invalid link_config 0x%x"
8485 " speed_cap_mask 0x%x\n",
8486 bp->port.link_config,
8487 bp->link_params.speed_cap_mask);
8492 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8493 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8494 bp->link_params.req_line_speed = SPEED_100;
8495 bp->link_params.req_duplex = DUPLEX_HALF;
8496 bp->port.advertising = (ADVERTISED_100baseT_Half |
8499 BNX2X_ERR("NVRAM config error. "
8500 "Invalid link_config 0x%x"
8501 " speed_cap_mask 0x%x\n",
8502 bp->port.link_config,
8503 bp->link_params.speed_cap_mask);
8508 case PORT_FEATURE_LINK_SPEED_1G:
8509 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8510 bp->link_params.req_line_speed = SPEED_1000;
8511 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8514 BNX2X_ERR("NVRAM config error. "
8515 "Invalid link_config 0x%x"
8516 " speed_cap_mask 0x%x\n",
8517 bp->port.link_config,
8518 bp->link_params.speed_cap_mask);
8523 case PORT_FEATURE_LINK_SPEED_2_5G:
8524 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8525 bp->link_params.req_line_speed = SPEED_2500;
8526 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8529 BNX2X_ERR("NVRAM config error. "
8530 "Invalid link_config 0x%x"
8531 " speed_cap_mask 0x%x\n",
8532 bp->port.link_config,
8533 bp->link_params.speed_cap_mask);
8538 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8539 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8540 case PORT_FEATURE_LINK_SPEED_10G_KR:
8541 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8542 bp->link_params.req_line_speed = SPEED_10000;
8543 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8546 BNX2X_ERR("NVRAM config error. "
8547 "Invalid link_config 0x%x"
8548 " speed_cap_mask 0x%x\n",
8549 bp->port.link_config,
8550 bp->link_params.speed_cap_mask);
8556 BNX2X_ERR("NVRAM config error. "
8557 "BAD link speed link_config 0x%x\n",
8558 bp->port.link_config);
8559 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8560 bp->port.advertising = bp->port.supported;
8564 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8565 PORT_FEATURE_FLOW_CONTROL_MASK);
8566 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8567 !(bp->port.supported & SUPPORTED_Autoneg))
8568 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8570 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8571 " advertising 0x%x\n",
8572 bp->link_params.req_line_speed,
8573 bp->link_params.req_duplex,
8574 bp->link_params.req_flow_ctrl, bp->port.advertising);
8577 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8579 mac_hi = cpu_to_be16(mac_hi);
8580 mac_lo = cpu_to_be32(mac_lo);
8581 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8582 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8585 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8587 int port = BP_PORT(bp);
8593 bp->link_params.bp = bp;
8594 bp->link_params.port = port;
8596 bp->link_params.lane_config =
8597 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8598 bp->link_params.ext_phy_config =
8600 dev_info.port_hw_config[port].external_phy_config);
8601 /* BCM8727_NOC => BCM8727 no over current */
8602 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8603 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8604 bp->link_params.ext_phy_config &=
8605 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8606 bp->link_params.ext_phy_config |=
8607 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8608 bp->link_params.feature_config_flags |=
8609 FEATURE_CONFIG_BCM8727_NOC;
8612 bp->link_params.speed_cap_mask =
8614 dev_info.port_hw_config[port].speed_capability_mask);
8616 bp->port.link_config =
8617 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8619 /* Get the 4 lanes xgxs config rx and tx */
8620 for (i = 0; i < 2; i++) {
8622 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8623 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8624 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8627 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8628 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8629 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8632 /* If the device is capable of WoL, set the default state according
8635 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8636 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8637 (config & PORT_FEATURE_WOL_ENABLED));
8639 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8640 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8641 bp->link_params.lane_config,
8642 bp->link_params.ext_phy_config,
8643 bp->link_params.speed_cap_mask, bp->port.link_config);
8645 bp->link_params.switch_cfg |= (bp->port.link_config &
8646 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8647 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8649 bnx2x_link_settings_requested(bp);
8652 * If connected directly, work with the internal PHY, otherwise, work
8653 * with the external PHY
8655 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8656 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8657 bp->mdio.prtad = bp->link_params.phy_addr;
8659 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8660 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8662 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8664 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8665 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8666 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8667 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8668 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8671 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8673 int func = BP_FUNC(bp);
8677 bnx2x_get_common_hwinfo(bp);
8681 if (CHIP_IS_E1H(bp)) {
8683 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8685 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8686 FUNC_MF_CFG_E1HOV_TAG_MASK);
8687 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8689 BNX2X_DEV_INFO("%s function mode\n",
8690 IS_E1HMF(bp) ? "multi" : "single");
8693 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8695 FUNC_MF_CFG_E1HOV_TAG_MASK);
8696 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8698 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8700 func, bp->e1hov, bp->e1hov);
8702 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8703 " aborting\n", func);
8708 BNX2X_ERR("!!! VN %d in single function mode,"
8709 " aborting\n", BP_E1HVN(bp));
8715 if (!BP_NOMCP(bp)) {
8716 bnx2x_get_port_hwinfo(bp);
8718 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8719 DRV_MSG_SEQ_NUMBER_MASK);
8720 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8724 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8725 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8726 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8727 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8728 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8729 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8730 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8731 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8732 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8733 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8734 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8736 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8744 /* only supposed to happen on emulation/FPGA */
8745 BNX2X_ERR("warning random MAC workaround active\n");
8746 random_ether_addr(bp->dev->dev_addr);
8747 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8753 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8755 int func = BP_FUNC(bp);
8759 /* Disable interrupt handling until HW is initialized */
8760 atomic_set(&bp->intr_sem, 1);
8761 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8763 mutex_init(&bp->port.phy_mutex);
8765 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8766 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8768 rc = bnx2x_get_hwinfo(bp);
8770 /* need to reset chip if undi was active */
8772 bnx2x_undi_unload(bp);
8774 if (CHIP_REV_IS_FPGA(bp))
8775 printk(KERN_ERR PFX "FPGA detected\n");
8777 if (BP_NOMCP(bp) && (func == 0))
8779 "MCP disabled, must load devices in order!\n");
8781 /* Set multi queue mode */
8782 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8783 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8785 "Multi disabled since int_mode requested is not MSI-X\n");
8786 multi_mode = ETH_RSS_MODE_DISABLED;
8788 bp->multi_mode = multi_mode;
8793 bp->flags &= ~TPA_ENABLE_FLAG;
8794 bp->dev->features &= ~NETIF_F_LRO;
8796 bp->flags |= TPA_ENABLE_FLAG;
8797 bp->dev->features |= NETIF_F_LRO;
8801 bp->dropless_fc = 0;
8803 bp->dropless_fc = dropless_fc;
8807 bp->tx_ring_size = MAX_TX_AVAIL;
8808 bp->rx_ring_size = MAX_RX_AVAIL;
8815 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8816 bp->current_interval = (poll ? poll : timer_interval);
8818 init_timer(&bp->timer);
8819 bp->timer.expires = jiffies + bp->current_interval;
8820 bp->timer.data = (unsigned long) bp;
8821 bp->timer.function = bnx2x_timer;
8827 * ethtool service functions
8830 /* All ethtool functions called with rtnl_lock */
8832 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8834 struct bnx2x *bp = netdev_priv(dev);
8836 cmd->supported = bp->port.supported;
8837 cmd->advertising = bp->port.advertising;
8839 if (netif_carrier_ok(dev)) {
8840 cmd->speed = bp->link_vars.line_speed;
8841 cmd->duplex = bp->link_vars.duplex;
8843 cmd->speed = bp->link_params.req_line_speed;
8844 cmd->duplex = bp->link_params.req_duplex;
8849 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8850 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8851 if (vn_max_rate < cmd->speed)
8852 cmd->speed = vn_max_rate;
8855 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8857 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8859 switch (ext_phy_type) {
8860 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8861 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8862 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8863 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8864 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8865 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8866 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8867 cmd->port = PORT_FIBRE;
8870 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8871 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8872 cmd->port = PORT_TP;
8875 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8876 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8877 bp->link_params.ext_phy_config);
8881 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8882 bp->link_params.ext_phy_config);
8886 cmd->port = PORT_TP;
8888 cmd->phy_address = bp->mdio.prtad;
8889 cmd->transceiver = XCVR_INTERNAL;
8891 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8892 cmd->autoneg = AUTONEG_ENABLE;
8894 cmd->autoneg = AUTONEG_DISABLE;
8899 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8900 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8901 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8902 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8903 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8904 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8905 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8910 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8912 struct bnx2x *bp = netdev_priv(dev);
8918 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8919 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8920 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8921 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8922 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8923 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8924 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8926 if (cmd->autoneg == AUTONEG_ENABLE) {
8927 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8928 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8932 /* advertise the requested speed and duplex if supported */
8933 cmd->advertising &= bp->port.supported;
8935 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8936 bp->link_params.req_duplex = DUPLEX_FULL;
8937 bp->port.advertising |= (ADVERTISED_Autoneg |
8940 } else { /* forced speed */
8941 /* advertise the requested speed and duplex if supported */
8942 switch (cmd->speed) {
8944 if (cmd->duplex == DUPLEX_FULL) {
8945 if (!(bp->port.supported &
8946 SUPPORTED_10baseT_Full)) {
8948 "10M full not supported\n");
8952 advertising = (ADVERTISED_10baseT_Full |
8955 if (!(bp->port.supported &
8956 SUPPORTED_10baseT_Half)) {
8958 "10M half not supported\n");
8962 advertising = (ADVERTISED_10baseT_Half |
8968 if (cmd->duplex == DUPLEX_FULL) {
8969 if (!(bp->port.supported &
8970 SUPPORTED_100baseT_Full)) {
8972 "100M full not supported\n");
8976 advertising = (ADVERTISED_100baseT_Full |
8979 if (!(bp->port.supported &
8980 SUPPORTED_100baseT_Half)) {
8982 "100M half not supported\n");
8986 advertising = (ADVERTISED_100baseT_Half |
8992 if (cmd->duplex != DUPLEX_FULL) {
8993 DP(NETIF_MSG_LINK, "1G half not supported\n");
8997 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8998 DP(NETIF_MSG_LINK, "1G full not supported\n");
9002 advertising = (ADVERTISED_1000baseT_Full |
9007 if (cmd->duplex != DUPLEX_FULL) {
9009 "2.5G half not supported\n");
9013 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9015 "2.5G full not supported\n");
9019 advertising = (ADVERTISED_2500baseX_Full |
9024 if (cmd->duplex != DUPLEX_FULL) {
9025 DP(NETIF_MSG_LINK, "10G half not supported\n");
9029 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9030 DP(NETIF_MSG_LINK, "10G full not supported\n");
9034 advertising = (ADVERTISED_10000baseT_Full |
9039 DP(NETIF_MSG_LINK, "Unsupported speed\n");
9043 bp->link_params.req_line_speed = cmd->speed;
9044 bp->link_params.req_duplex = cmd->duplex;
9045 bp->port.advertising = advertising;
9048 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9049 DP_LEVEL " req_duplex %d advertising 0x%x\n",
9050 bp->link_params.req_line_speed, bp->link_params.req_duplex,
9051 bp->port.advertising);
9053 if (netif_running(dev)) {
9054 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9061 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9062 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9064 static int bnx2x_get_regs_len(struct net_device *dev)
9066 struct bnx2x *bp = netdev_priv(dev);
9067 int regdump_len = 0;
9070 if (CHIP_IS_E1(bp)) {
9071 for (i = 0; i < REGS_COUNT; i++)
9072 if (IS_E1_ONLINE(reg_addrs[i].info))
9073 regdump_len += reg_addrs[i].size;
9075 for (i = 0; i < WREGS_COUNT_E1; i++)
9076 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9077 regdump_len += wreg_addrs_e1[i].size *
9078 (1 + wreg_addrs_e1[i].read_regs_count);
9081 for (i = 0; i < REGS_COUNT; i++)
9082 if (IS_E1H_ONLINE(reg_addrs[i].info))
9083 regdump_len += reg_addrs[i].size;
9085 for (i = 0; i < WREGS_COUNT_E1H; i++)
9086 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9087 regdump_len += wreg_addrs_e1h[i].size *
9088 (1 + wreg_addrs_e1h[i].read_regs_count);
9091 regdump_len += sizeof(struct dump_hdr);
9096 static void bnx2x_get_regs(struct net_device *dev,
9097 struct ethtool_regs *regs, void *_p)
9100 struct bnx2x *bp = netdev_priv(dev);
9101 struct dump_hdr dump_hdr = {0};
9104 memset(p, 0, regs->len);
9106 if (!netif_running(bp->dev))
9109 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9110 dump_hdr.dump_sign = dump_sign_all;
9111 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9112 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9113 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9114 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9115 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9117 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9118 p += dump_hdr.hdr_size + 1;
9120 if (CHIP_IS_E1(bp)) {
9121 for (i = 0; i < REGS_COUNT; i++)
9122 if (IS_E1_ONLINE(reg_addrs[i].info))
9123 for (j = 0; j < reg_addrs[i].size; j++)
9125 reg_addrs[i].addr + j*4);
9128 for (i = 0; i < REGS_COUNT; i++)
9129 if (IS_E1H_ONLINE(reg_addrs[i].info))
9130 for (j = 0; j < reg_addrs[i].size; j++)
9132 reg_addrs[i].addr + j*4);
9136 #define PHY_FW_VER_LEN 10
9138 static void bnx2x_get_drvinfo(struct net_device *dev,
9139 struct ethtool_drvinfo *info)
9141 struct bnx2x *bp = netdev_priv(dev);
9142 u8 phy_fw_ver[PHY_FW_VER_LEN];
9144 strcpy(info->driver, DRV_MODULE_NAME);
9145 strcpy(info->version, DRV_MODULE_VERSION);
9147 phy_fw_ver[0] = '\0';
9149 bnx2x_acquire_phy_lock(bp);
9150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9151 (bp->state != BNX2X_STATE_CLOSED),
9152 phy_fw_ver, PHY_FW_VER_LEN);
9153 bnx2x_release_phy_lock(bp);
9156 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9157 (bp->common.bc_ver & 0xff0000) >> 16,
9158 (bp->common.bc_ver & 0xff00) >> 8,
9159 (bp->common.bc_ver & 0xff),
9160 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9161 strcpy(info->bus_info, pci_name(bp->pdev));
9162 info->n_stats = BNX2X_NUM_STATS;
9163 info->testinfo_len = BNX2X_NUM_TESTS;
9164 info->eedump_len = bp->common.flash_size;
9165 info->regdump_len = bnx2x_get_regs_len(dev);
9168 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9170 struct bnx2x *bp = netdev_priv(dev);
9172 if (bp->flags & NO_WOL_FLAG) {
9176 wol->supported = WAKE_MAGIC;
9178 wol->wolopts = WAKE_MAGIC;
9182 memset(&wol->sopass, 0, sizeof(wol->sopass));
9185 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9187 struct bnx2x *bp = netdev_priv(dev);
9189 if (wol->wolopts & ~WAKE_MAGIC)
9192 if (wol->wolopts & WAKE_MAGIC) {
9193 if (bp->flags & NO_WOL_FLAG)
9203 static u32 bnx2x_get_msglevel(struct net_device *dev)
9205 struct bnx2x *bp = netdev_priv(dev);
9207 return bp->msglevel;
9210 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9212 struct bnx2x *bp = netdev_priv(dev);
9214 if (capable(CAP_NET_ADMIN))
9215 bp->msglevel = level;
9218 static int bnx2x_nway_reset(struct net_device *dev)
9220 struct bnx2x *bp = netdev_priv(dev);
9225 if (netif_running(dev)) {
9226 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9233 static u32 bnx2x_get_link(struct net_device *dev)
9235 struct bnx2x *bp = netdev_priv(dev);
9237 return bp->link_vars.link_up;
9240 static int bnx2x_get_eeprom_len(struct net_device *dev)
9242 struct bnx2x *bp = netdev_priv(dev);
9244 return bp->common.flash_size;
9247 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9249 int port = BP_PORT(bp);
9253 /* adjust timeout for emulation/FPGA */
9254 count = NVRAM_TIMEOUT_COUNT;
9255 if (CHIP_REV_IS_SLOW(bp))
9258 /* request access to nvram interface */
9259 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9260 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9262 for (i = 0; i < count*10; i++) {
9263 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9264 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9270 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9271 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9278 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9280 int port = BP_PORT(bp);
9284 /* adjust timeout for emulation/FPGA */
9285 count = NVRAM_TIMEOUT_COUNT;
9286 if (CHIP_REV_IS_SLOW(bp))
9289 /* relinquish nvram interface */
9290 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9291 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9293 for (i = 0; i < count*10; i++) {
9294 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9295 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9301 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9302 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9309 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9313 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9315 /* enable both bits, even on read */
9316 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9317 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9318 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9321 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9325 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9327 /* disable both bits, even after read */
9328 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9329 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9330 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9333 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9339 /* build the command word */
9340 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9342 /* need to clear DONE bit separately */
9343 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9345 /* address of the NVRAM to read from */
9346 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9347 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9349 /* issue a read command */
9350 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9352 /* adjust timeout for emulation/FPGA */
9353 count = NVRAM_TIMEOUT_COUNT;
9354 if (CHIP_REV_IS_SLOW(bp))
9357 /* wait for completion */
9360 for (i = 0; i < count; i++) {
9362 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9364 if (val & MCPR_NVM_COMMAND_DONE) {
9365 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9366 /* we read nvram data in cpu order
9367 * but ethtool sees it as an array of bytes
9368 * converting to big-endian will do the work */
9369 *ret_val = cpu_to_be32(val);
9378 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9385 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9387 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9392 if (offset + buf_size > bp->common.flash_size) {
9393 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9394 " buf_size (0x%x) > flash_size (0x%x)\n",
9395 offset, buf_size, bp->common.flash_size);
9399 /* request access to nvram interface */
9400 rc = bnx2x_acquire_nvram_lock(bp);
9404 /* enable access to nvram interface */
9405 bnx2x_enable_nvram_access(bp);
9407 /* read the first word(s) */
9408 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9409 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9410 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9411 memcpy(ret_buf, &val, 4);
9413 /* advance to the next dword */
9414 offset += sizeof(u32);
9415 ret_buf += sizeof(u32);
9416 buf_size -= sizeof(u32);
9421 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9422 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9423 memcpy(ret_buf, &val, 4);
9426 /* disable access to nvram interface */
9427 bnx2x_disable_nvram_access(bp);
9428 bnx2x_release_nvram_lock(bp);
9433 static int bnx2x_get_eeprom(struct net_device *dev,
9434 struct ethtool_eeprom *eeprom, u8 *eebuf)
9436 struct bnx2x *bp = netdev_priv(dev);
9439 if (!netif_running(dev))
9442 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9443 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9444 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9445 eeprom->len, eeprom->len);
9447 /* parameters already validated in ethtool_get_eeprom */
9449 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9454 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9459 /* build the command word */
9460 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9462 /* need to clear DONE bit separately */
9463 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9465 /* write the data */
9466 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9468 /* address of the NVRAM to write to */
9469 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9470 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9472 /* issue the write command */
9473 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9475 /* adjust timeout for emulation/FPGA */
9476 count = NVRAM_TIMEOUT_COUNT;
9477 if (CHIP_REV_IS_SLOW(bp))
9480 /* wait for completion */
9482 for (i = 0; i < count; i++) {
9484 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9485 if (val & MCPR_NVM_COMMAND_DONE) {
9494 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9496 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9504 if (offset + buf_size > bp->common.flash_size) {
9505 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9506 " buf_size (0x%x) > flash_size (0x%x)\n",
9507 offset, buf_size, bp->common.flash_size);
9511 /* request access to nvram interface */
9512 rc = bnx2x_acquire_nvram_lock(bp);
9516 /* enable access to nvram interface */
9517 bnx2x_enable_nvram_access(bp);
9519 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9520 align_offset = (offset & ~0x03);
9521 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9524 val &= ~(0xff << BYTE_OFFSET(offset));
9525 val |= (*data_buf << BYTE_OFFSET(offset));
9527 /* nvram data is returned as an array of bytes
9528 * convert it back to cpu order */
9529 val = be32_to_cpu(val);
9531 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9535 /* disable access to nvram interface */
9536 bnx2x_disable_nvram_access(bp);
9537 bnx2x_release_nvram_lock(bp);
9542 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9550 if (buf_size == 1) /* ethtool */
9551 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9553 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9555 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9560 if (offset + buf_size > bp->common.flash_size) {
9561 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9562 " buf_size (0x%x) > flash_size (0x%x)\n",
9563 offset, buf_size, bp->common.flash_size);
9567 /* request access to nvram interface */
9568 rc = bnx2x_acquire_nvram_lock(bp);
9572 /* enable access to nvram interface */
9573 bnx2x_enable_nvram_access(bp);
9576 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9577 while ((written_so_far < buf_size) && (rc == 0)) {
9578 if (written_so_far == (buf_size - sizeof(u32)))
9579 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9580 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9581 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9582 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9583 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9585 memcpy(&val, data_buf, 4);
9587 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9589 /* advance to the next dword */
9590 offset += sizeof(u32);
9591 data_buf += sizeof(u32);
9592 written_so_far += sizeof(u32);
9596 /* disable access to nvram interface */
9597 bnx2x_disable_nvram_access(bp);
9598 bnx2x_release_nvram_lock(bp);
9603 static int bnx2x_set_eeprom(struct net_device *dev,
9604 struct ethtool_eeprom *eeprom, u8 *eebuf)
9606 struct bnx2x *bp = netdev_priv(dev);
9607 int port = BP_PORT(bp);
9610 if (!netif_running(dev))
9613 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9614 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9615 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9616 eeprom->len, eeprom->len);
9618 /* parameters already validated in ethtool_set_eeprom */
9620 /* PHY eeprom can be accessed only by the PMF */
9621 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9625 if (eeprom->magic == 0x50485950) {
9626 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9627 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9629 bnx2x_acquire_phy_lock(bp);
9630 rc |= bnx2x_link_reset(&bp->link_params,
9632 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9633 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9634 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9635 MISC_REGISTERS_GPIO_HIGH, port);
9636 bnx2x_release_phy_lock(bp);
9637 bnx2x_link_report(bp);
9639 } else if (eeprom->magic == 0x50485952) {
9640 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9641 if ((bp->state == BNX2X_STATE_OPEN) ||
9642 (bp->state == BNX2X_STATE_DISABLED)) {
9643 bnx2x_acquire_phy_lock(bp);
9644 rc |= bnx2x_link_reset(&bp->link_params,
9647 rc |= bnx2x_phy_init(&bp->link_params,
9649 bnx2x_release_phy_lock(bp);
9650 bnx2x_calc_fc_adv(bp);
9652 } else if (eeprom->magic == 0x53985943) {
9653 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9654 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9655 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9657 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9659 /* DSP Remove Download Mode */
9660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9661 MISC_REGISTERS_GPIO_LOW, port);
9663 bnx2x_acquire_phy_lock(bp);
9665 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9667 /* wait 0.5 sec to allow it to run */
9669 bnx2x_ext_phy_hw_reset(bp, port);
9671 bnx2x_release_phy_lock(bp);
9674 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9679 static int bnx2x_get_coalesce(struct net_device *dev,
9680 struct ethtool_coalesce *coal)
9682 struct bnx2x *bp = netdev_priv(dev);
9684 memset(coal, 0, sizeof(struct ethtool_coalesce));
9686 coal->rx_coalesce_usecs = bp->rx_ticks;
9687 coal->tx_coalesce_usecs = bp->tx_ticks;
9692 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9693 static int bnx2x_set_coalesce(struct net_device *dev,
9694 struct ethtool_coalesce *coal)
9696 struct bnx2x *bp = netdev_priv(dev);
9698 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9699 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9700 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9702 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9703 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9704 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9706 if (netif_running(dev))
9707 bnx2x_update_coalesce(bp);
9712 static void bnx2x_get_ringparam(struct net_device *dev,
9713 struct ethtool_ringparam *ering)
9715 struct bnx2x *bp = netdev_priv(dev);
9717 ering->rx_max_pending = MAX_RX_AVAIL;
9718 ering->rx_mini_max_pending = 0;
9719 ering->rx_jumbo_max_pending = 0;
9721 ering->rx_pending = bp->rx_ring_size;
9722 ering->rx_mini_pending = 0;
9723 ering->rx_jumbo_pending = 0;
9725 ering->tx_max_pending = MAX_TX_AVAIL;
9726 ering->tx_pending = bp->tx_ring_size;
9729 static int bnx2x_set_ringparam(struct net_device *dev,
9730 struct ethtool_ringparam *ering)
9732 struct bnx2x *bp = netdev_priv(dev);
9735 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9736 (ering->tx_pending > MAX_TX_AVAIL) ||
9737 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9740 bp->rx_ring_size = ering->rx_pending;
9741 bp->tx_ring_size = ering->tx_pending;
9743 if (netif_running(dev)) {
9744 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9745 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9751 static void bnx2x_get_pauseparam(struct net_device *dev,
9752 struct ethtool_pauseparam *epause)
9754 struct bnx2x *bp = netdev_priv(dev);
9756 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9757 BNX2X_FLOW_CTRL_AUTO) &&
9758 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9760 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9761 BNX2X_FLOW_CTRL_RX);
9762 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9763 BNX2X_FLOW_CTRL_TX);
9765 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9766 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9767 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9770 static int bnx2x_set_pauseparam(struct net_device *dev,
9771 struct ethtool_pauseparam *epause)
9773 struct bnx2x *bp = netdev_priv(dev);
9778 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9779 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9780 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9782 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9784 if (epause->rx_pause)
9785 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9787 if (epause->tx_pause)
9788 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9790 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9791 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9793 if (epause->autoneg) {
9794 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9795 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9799 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9800 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9804 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9806 if (netif_running(dev)) {
9807 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9814 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9816 struct bnx2x *bp = netdev_priv(dev);
9820 /* TPA requires Rx CSUM offloading */
9821 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9822 if (!(dev->features & NETIF_F_LRO)) {
9823 dev->features |= NETIF_F_LRO;
9824 bp->flags |= TPA_ENABLE_FLAG;
9828 } else if (dev->features & NETIF_F_LRO) {
9829 dev->features &= ~NETIF_F_LRO;
9830 bp->flags &= ~TPA_ENABLE_FLAG;
9834 if (changed && netif_running(dev)) {
9835 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9836 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9842 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9844 struct bnx2x *bp = netdev_priv(dev);
9849 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9851 struct bnx2x *bp = netdev_priv(dev);
9856 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9857 TPA'ed packets will be discarded due to wrong TCP CSUM */
9859 u32 flags = ethtool_op_get_flags(dev);
9861 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9867 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9870 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9871 dev->features |= NETIF_F_TSO6;
9873 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9874 dev->features &= ~NETIF_F_TSO6;
9880 static const struct {
9881 char string[ETH_GSTRING_LEN];
9882 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9883 { "register_test (offline)" },
9884 { "memory_test (offline)" },
9885 { "loopback_test (offline)" },
9886 { "nvram_test (online)" },
9887 { "interrupt_test (online)" },
9888 { "link_test (online)" },
9889 { "idle check (online)" }
9892 static int bnx2x_test_registers(struct bnx2x *bp)
9894 int idx, i, rc = -ENODEV;
9896 int port = BP_PORT(bp);
9897 static const struct {
9902 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9903 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9904 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9905 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9906 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9907 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9908 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9909 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9910 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9911 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9912 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9913 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9914 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9915 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9916 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9917 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9918 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9919 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9920 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9921 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9922 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9923 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9924 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9925 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9926 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9927 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9928 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9929 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9930 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9931 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9932 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9933 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9934 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9935 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9936 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9937 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9938 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9940 { 0xffffffff, 0, 0x00000000 }
9943 if (!netif_running(bp->dev))
9946 /* Repeat the test twice:
9947 First by writing 0x00000000, second by writing 0xffffffff */
9948 for (idx = 0; idx < 2; idx++) {
9955 wr_val = 0xffffffff;
9959 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9960 u32 offset, mask, save_val, val;
9962 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9963 mask = reg_tbl[i].mask;
9965 save_val = REG_RD(bp, offset);
9967 REG_WR(bp, offset, wr_val);
9968 val = REG_RD(bp, offset);
9970 /* Restore the original register's value */
9971 REG_WR(bp, offset, save_val);
9973 /* verify that value is as expected value */
9974 if ((val & mask) != (wr_val & mask))
9985 static int bnx2x_test_memory(struct bnx2x *bp)
9987 int i, j, rc = -ENODEV;
9989 static const struct {
9993 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9994 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9995 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9996 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9997 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9998 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9999 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10003 static const struct {
10009 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10010 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10011 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10012 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10013 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10014 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10016 { NULL, 0xffffffff, 0, 0 }
10019 if (!netif_running(bp->dev))
10022 /* Go through all the memories */
10023 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10024 for (j = 0; j < mem_tbl[i].size; j++)
10025 REG_RD(bp, mem_tbl[i].offset + j*4);
10027 /* Check the parity status */
10028 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10029 val = REG_RD(bp, prty_tbl[i].offset);
10030 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10031 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10033 "%s is 0x%x\n", prty_tbl[i].name, val);
10034 goto test_mem_exit;
10044 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10049 while (bnx2x_link_test(bp) && cnt--)
10053 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10055 unsigned int pkt_size, num_pkts, i;
10056 struct sk_buff *skb;
10057 unsigned char *packet;
10058 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10059 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10060 u16 tx_start_idx, tx_idx;
10061 u16 rx_start_idx, rx_idx;
10062 u16 pkt_prod, bd_prod;
10063 struct sw_tx_bd *tx_buf;
10064 struct eth_tx_start_bd *tx_start_bd;
10065 struct eth_tx_parse_bd *pbd = NULL;
10066 dma_addr_t mapping;
10067 union eth_rx_cqe *cqe;
10069 struct sw_rx_bd *rx_buf;
10073 /* check the loopback mode */
10074 switch (loopback_mode) {
10075 case BNX2X_PHY_LOOPBACK:
10076 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10079 case BNX2X_MAC_LOOPBACK:
10080 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10081 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10087 /* prepare the loopback packet */
10088 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10089 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10090 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10093 goto test_loopback_exit;
10095 packet = skb_put(skb, pkt_size);
10096 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10097 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10098 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10099 for (i = ETH_HLEN; i < pkt_size; i++)
10100 packet[i] = (unsigned char) (i & 0xff);
10102 /* send the loopback packet */
10104 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10105 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10107 pkt_prod = fp_tx->tx_pkt_prod++;
10108 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10109 tx_buf->first_bd = fp_tx->tx_bd_prod;
10113 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10114 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10115 mapping = pci_map_single(bp->pdev, skb->data,
10116 skb_headlen(skb), PCI_DMA_TODEVICE);
10117 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10118 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10119 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10120 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10121 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10122 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10123 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10124 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10126 /* turn on parsing and get a BD */
10127 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10128 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10130 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10134 fp_tx->tx_db.data.prod += 2;
10136 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10141 fp_tx->tx_bd_prod += 2; /* start + pbd */
10142 bp->dev->trans_start = jiffies;
10146 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10147 if (tx_idx != tx_start_idx + num_pkts)
10148 goto test_loopback_exit;
10150 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10151 if (rx_idx != rx_start_idx + num_pkts)
10152 goto test_loopback_exit;
10154 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10155 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10156 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10157 goto test_loopback_rx_exit;
10159 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10160 if (len != pkt_size)
10161 goto test_loopback_rx_exit;
10163 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10165 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10166 for (i = ETH_HLEN; i < pkt_size; i++)
10167 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10168 goto test_loopback_rx_exit;
10172 test_loopback_rx_exit:
10174 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10175 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10176 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10177 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10179 /* Update producers */
10180 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10181 fp_rx->rx_sge_prod);
10183 test_loopback_exit:
10184 bp->link_params.loopback_mode = LOOPBACK_NONE;
10189 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10193 if (!netif_running(bp->dev))
10194 return BNX2X_LOOPBACK_FAILED;
10196 bnx2x_netif_stop(bp, 1);
10197 bnx2x_acquire_phy_lock(bp);
10199 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10201 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10202 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10205 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10207 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10208 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10211 bnx2x_release_phy_lock(bp);
10212 bnx2x_netif_start(bp);
10217 #define CRC32_RESIDUAL 0xdebb20e3
10219 static int bnx2x_test_nvram(struct bnx2x *bp)
10221 static const struct {
10225 { 0, 0x14 }, /* bootstrap */
10226 { 0x14, 0xec }, /* dir */
10227 { 0x100, 0x350 }, /* manuf_info */
10228 { 0x450, 0xf0 }, /* feature_info */
10229 { 0x640, 0x64 }, /* upgrade_key_info */
10231 { 0x708, 0x70 }, /* manuf_key_info */
10235 __be32 buf[0x350 / 4];
10236 u8 *data = (u8 *)buf;
10240 rc = bnx2x_nvram_read(bp, 0, data, 4);
10242 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10243 goto test_nvram_exit;
10246 magic = be32_to_cpu(buf[0]);
10247 if (magic != 0x669955aa) {
10248 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10250 goto test_nvram_exit;
10253 for (i = 0; nvram_tbl[i].size; i++) {
10255 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10256 nvram_tbl[i].size);
10258 DP(NETIF_MSG_PROBE,
10259 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10260 goto test_nvram_exit;
10263 crc = ether_crc_le(nvram_tbl[i].size, data);
10264 if (crc != CRC32_RESIDUAL) {
10265 DP(NETIF_MSG_PROBE,
10266 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10268 goto test_nvram_exit;
10276 static int bnx2x_test_intr(struct bnx2x *bp)
10278 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10281 if (!netif_running(bp->dev))
10284 config->hdr.length = 0;
10285 if (CHIP_IS_E1(bp))
10286 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10288 config->hdr.offset = BP_FUNC(bp);
10289 config->hdr.client_id = bp->fp->cl_id;
10290 config->hdr.reserved1 = 0;
10292 bp->set_mac_pending++;
10294 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10295 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10296 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10298 for (i = 0; i < 10; i++) {
10299 if (!bp->set_mac_pending)
10302 msleep_interruptible(10);
10311 static void bnx2x_self_test(struct net_device *dev,
10312 struct ethtool_test *etest, u64 *buf)
10314 struct bnx2x *bp = netdev_priv(dev);
10316 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10318 if (!netif_running(dev))
10321 /* offline tests are not supported in MF mode */
10323 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10325 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10326 int port = BP_PORT(bp);
10330 /* save current value of input enable for TX port IF */
10331 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10332 /* disable input for TX port IF */
10333 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10335 link_up = bp->link_vars.link_up;
10336 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10337 bnx2x_nic_load(bp, LOAD_DIAG);
10338 /* wait until link state is restored */
10339 bnx2x_wait_for_link(bp, link_up);
10341 if (bnx2x_test_registers(bp) != 0) {
10343 etest->flags |= ETH_TEST_FL_FAILED;
10345 if (bnx2x_test_memory(bp) != 0) {
10347 etest->flags |= ETH_TEST_FL_FAILED;
10349 buf[2] = bnx2x_test_loopback(bp, link_up);
10351 etest->flags |= ETH_TEST_FL_FAILED;
10353 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10355 /* restore input for TX port IF */
10356 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10358 bnx2x_nic_load(bp, LOAD_NORMAL);
10359 /* wait until link state is restored */
10360 bnx2x_wait_for_link(bp, link_up);
10362 if (bnx2x_test_nvram(bp) != 0) {
10364 etest->flags |= ETH_TEST_FL_FAILED;
10366 if (bnx2x_test_intr(bp) != 0) {
10368 etest->flags |= ETH_TEST_FL_FAILED;
10371 if (bnx2x_link_test(bp) != 0) {
10373 etest->flags |= ETH_TEST_FL_FAILED;
10376 #ifdef BNX2X_EXTRA_DEBUG
10377 bnx2x_panic_dump(bp);
10381 static const struct {
10384 u8 string[ETH_GSTRING_LEN];
10385 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10386 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10387 { Q_STATS_OFFSET32(error_bytes_received_hi),
10388 8, "[%d]: rx_error_bytes" },
10389 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10390 8, "[%d]: rx_ucast_packets" },
10391 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10392 8, "[%d]: rx_mcast_packets" },
10393 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10394 8, "[%d]: rx_bcast_packets" },
10395 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10396 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10397 4, "[%d]: rx_phy_ip_err_discards"},
10398 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10399 4, "[%d]: rx_skb_alloc_discard" },
10400 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10402 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10403 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10404 8, "[%d]: tx_packets" }
10407 static const struct {
10411 #define STATS_FLAGS_PORT 1
10412 #define STATS_FLAGS_FUNC 2
10413 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10414 u8 string[ETH_GSTRING_LEN];
10415 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10416 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10417 8, STATS_FLAGS_BOTH, "rx_bytes" },
10418 { STATS_OFFSET32(error_bytes_received_hi),
10419 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10420 { STATS_OFFSET32(total_unicast_packets_received_hi),
10421 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10422 { STATS_OFFSET32(total_multicast_packets_received_hi),
10423 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10424 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10425 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10426 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10427 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10428 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10429 8, STATS_FLAGS_PORT, "rx_align_errors" },
10430 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10431 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10432 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10433 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10434 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10435 8, STATS_FLAGS_PORT, "rx_fragments" },
10436 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10437 8, STATS_FLAGS_PORT, "rx_jabbers" },
10438 { STATS_OFFSET32(no_buff_discard_hi),
10439 8, STATS_FLAGS_BOTH, "rx_discards" },
10440 { STATS_OFFSET32(mac_filter_discard),
10441 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10442 { STATS_OFFSET32(xxoverflow_discard),
10443 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10444 { STATS_OFFSET32(brb_drop_hi),
10445 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10446 { STATS_OFFSET32(brb_truncate_hi),
10447 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10448 { STATS_OFFSET32(pause_frames_received_hi),
10449 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10450 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10451 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10452 { STATS_OFFSET32(nig_timer_max),
10453 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10454 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10455 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10456 { STATS_OFFSET32(rx_skb_alloc_failed),
10457 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10458 { STATS_OFFSET32(hw_csum_err),
10459 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10461 { STATS_OFFSET32(total_bytes_transmitted_hi),
10462 8, STATS_FLAGS_BOTH, "tx_bytes" },
10463 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10464 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10465 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10466 8, STATS_FLAGS_BOTH, "tx_packets" },
10467 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10468 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10469 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10470 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10471 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10472 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10473 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10474 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10475 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10476 8, STATS_FLAGS_PORT, "tx_deferred" },
10477 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10478 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10479 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10480 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10481 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10482 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10483 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10484 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10485 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10486 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10487 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10488 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10489 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10490 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10491 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10492 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10493 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10494 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10495 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10496 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10497 { STATS_OFFSET32(pause_frames_sent_hi),
10498 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10501 #define IS_PORT_STAT(i) \
10502 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10503 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10504 #define IS_E1HMF_MODE_STAT(bp) \
10505 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10507 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10509 struct bnx2x *bp = netdev_priv(dev);
10512 switch(stringset) {
10514 if (is_multi(bp)) {
10515 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10516 if (!IS_E1HMF_MODE_STAT(bp))
10517 num_stats += BNX2X_NUM_STATS;
10519 if (IS_E1HMF_MODE_STAT(bp)) {
10521 for (i = 0; i < BNX2X_NUM_STATS; i++)
10522 if (IS_FUNC_STAT(i))
10525 num_stats = BNX2X_NUM_STATS;
10530 return BNX2X_NUM_TESTS;
10537 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10539 struct bnx2x *bp = netdev_priv(dev);
10542 switch (stringset) {
10544 if (is_multi(bp)) {
10546 for_each_rx_queue(bp, i) {
10547 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10548 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10549 bnx2x_q_stats_arr[j].string, i);
10550 k += BNX2X_NUM_Q_STATS;
10552 if (IS_E1HMF_MODE_STAT(bp))
10554 for (j = 0; j < BNX2X_NUM_STATS; j++)
10555 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10556 bnx2x_stats_arr[j].string);
10558 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10559 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10561 strcpy(buf + j*ETH_GSTRING_LEN,
10562 bnx2x_stats_arr[i].string);
10569 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10574 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10575 struct ethtool_stats *stats, u64 *buf)
10577 struct bnx2x *bp = netdev_priv(dev);
10578 u32 *hw_stats, *offset;
10581 if (is_multi(bp)) {
10583 for_each_rx_queue(bp, i) {
10584 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10585 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10586 if (bnx2x_q_stats_arr[j].size == 0) {
10587 /* skip this counter */
10591 offset = (hw_stats +
10592 bnx2x_q_stats_arr[j].offset);
10593 if (bnx2x_q_stats_arr[j].size == 4) {
10594 /* 4-byte counter */
10595 buf[k + j] = (u64) *offset;
10598 /* 8-byte counter */
10599 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10601 k += BNX2X_NUM_Q_STATS;
10603 if (IS_E1HMF_MODE_STAT(bp))
10605 hw_stats = (u32 *)&bp->eth_stats;
10606 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10607 if (bnx2x_stats_arr[j].size == 0) {
10608 /* skip this counter */
10612 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10613 if (bnx2x_stats_arr[j].size == 4) {
10614 /* 4-byte counter */
10615 buf[k + j] = (u64) *offset;
10618 /* 8-byte counter */
10619 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10622 hw_stats = (u32 *)&bp->eth_stats;
10623 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10624 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10626 if (bnx2x_stats_arr[i].size == 0) {
10627 /* skip this counter */
10632 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10633 if (bnx2x_stats_arr[i].size == 4) {
10634 /* 4-byte counter */
10635 buf[j] = (u64) *offset;
10639 /* 8-byte counter */
10640 buf[j] = HILO_U64(*offset, *(offset + 1));
10646 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10648 struct bnx2x *bp = netdev_priv(dev);
10649 int port = BP_PORT(bp);
10652 if (!netif_running(dev))
10661 for (i = 0; i < (data * 2); i++) {
10663 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10664 bp->link_params.hw_led_mode,
10665 bp->link_params.chip_id);
10667 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10668 bp->link_params.hw_led_mode,
10669 bp->link_params.chip_id);
10671 msleep_interruptible(500);
10672 if (signal_pending(current))
10676 if (bp->link_vars.link_up)
10677 bnx2x_set_led(bp, port, LED_MODE_OPER,
10678 bp->link_vars.line_speed,
10679 bp->link_params.hw_led_mode,
10680 bp->link_params.chip_id);
10685 static const struct ethtool_ops bnx2x_ethtool_ops = {
10686 .get_settings = bnx2x_get_settings,
10687 .set_settings = bnx2x_set_settings,
10688 .get_drvinfo = bnx2x_get_drvinfo,
10689 .get_regs_len = bnx2x_get_regs_len,
10690 .get_regs = bnx2x_get_regs,
10691 .get_wol = bnx2x_get_wol,
10692 .set_wol = bnx2x_set_wol,
10693 .get_msglevel = bnx2x_get_msglevel,
10694 .set_msglevel = bnx2x_set_msglevel,
10695 .nway_reset = bnx2x_nway_reset,
10696 .get_link = bnx2x_get_link,
10697 .get_eeprom_len = bnx2x_get_eeprom_len,
10698 .get_eeprom = bnx2x_get_eeprom,
10699 .set_eeprom = bnx2x_set_eeprom,
10700 .get_coalesce = bnx2x_get_coalesce,
10701 .set_coalesce = bnx2x_set_coalesce,
10702 .get_ringparam = bnx2x_get_ringparam,
10703 .set_ringparam = bnx2x_set_ringparam,
10704 .get_pauseparam = bnx2x_get_pauseparam,
10705 .set_pauseparam = bnx2x_set_pauseparam,
10706 .get_rx_csum = bnx2x_get_rx_csum,
10707 .set_rx_csum = bnx2x_set_rx_csum,
10708 .get_tx_csum = ethtool_op_get_tx_csum,
10709 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10710 .set_flags = bnx2x_set_flags,
10711 .get_flags = ethtool_op_get_flags,
10712 .get_sg = ethtool_op_get_sg,
10713 .set_sg = ethtool_op_set_sg,
10714 .get_tso = ethtool_op_get_tso,
10715 .set_tso = bnx2x_set_tso,
10716 .self_test = bnx2x_self_test,
10717 .get_sset_count = bnx2x_get_sset_count,
10718 .get_strings = bnx2x_get_strings,
10719 .phys_id = bnx2x_phys_id,
10720 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10723 /* end of ethtool_ops */
10725 /****************************************************************************
10726 * General service functions
10727 ****************************************************************************/
10729 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10733 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10737 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10738 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10739 PCI_PM_CTRL_PME_STATUS));
10741 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10742 /* delay required during transition out of D3hot */
10747 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10751 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10753 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10756 /* No more memory access after this point until
10757 * device is brought back to D0.
10767 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10771 /* Tell compiler that status block fields can change */
10773 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10774 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10776 return (fp->rx_comp_cons != rx_cons_sb);
10780 * net_device service functions
10783 static int bnx2x_poll(struct napi_struct *napi, int budget)
10785 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10787 struct bnx2x *bp = fp->bp;
10790 #ifdef BNX2X_STOP_ON_ERROR
10791 if (unlikely(bp->panic))
10795 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10796 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10798 bnx2x_update_fpsb_idx(fp);
10800 if (bnx2x_has_rx_work(fp)) {
10801 work_done = bnx2x_rx_int(fp, budget);
10803 /* must not complete if we consumed full budget */
10804 if (work_done >= budget)
10808 /* bnx2x_has_rx_work() reads the status block, thus we need to
10809 * ensure that status block indices have been actually read
10810 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10811 * so that we won't write the "newer" value of the status block to IGU
10812 * (if there was a DMA right after bnx2x_has_rx_work and
10813 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10814 * may be postponed to right before bnx2x_ack_sb). In this case
10815 * there will never be another interrupt until there is another update
10816 * of the status block, while there is still unhandled work.
10820 if (!bnx2x_has_rx_work(fp)) {
10821 #ifdef BNX2X_STOP_ON_ERROR
10824 napi_complete(napi);
10826 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10827 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10828 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10829 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10837 /* we split the first BD into headers and data BDs
10838 * to ease the pain of our fellow microcode engineers
10839 * we use one mapping for both BDs
10840 * So far this has only been observed to happen
10841 * in Other Operating Systems(TM)
10843 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10844 struct bnx2x_fastpath *fp,
10845 struct sw_tx_bd *tx_buf,
10846 struct eth_tx_start_bd **tx_bd, u16 hlen,
10847 u16 bd_prod, int nbd)
10849 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10850 struct eth_tx_bd *d_tx_bd;
10851 dma_addr_t mapping;
10852 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10854 /* first fix first BD */
10855 h_tx_bd->nbd = cpu_to_le16(nbd);
10856 h_tx_bd->nbytes = cpu_to_le16(hlen);
10858 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10859 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10860 h_tx_bd->addr_lo, h_tx_bd->nbd);
10862 /* now get a new data BD
10863 * (after the pbd) and fill it */
10864 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10865 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10867 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10868 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10870 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10871 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10872 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10874 /* this marks the BD as one that has no individual mapping */
10875 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10877 DP(NETIF_MSG_TX_QUEUED,
10878 "TSO split data size is %d (%x:%x)\n",
10879 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10882 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10887 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10890 csum = (u16) ~csum_fold(csum_sub(csum,
10891 csum_partial(t_header - fix, fix, 0)));
10894 csum = (u16) ~csum_fold(csum_add(csum,
10895 csum_partial(t_header, -fix, 0)));
10897 return swab16(csum);
10900 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10904 if (skb->ip_summed != CHECKSUM_PARTIAL)
10908 if (skb->protocol == htons(ETH_P_IPV6)) {
10910 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10911 rc |= XMIT_CSUM_TCP;
10915 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10916 rc |= XMIT_CSUM_TCP;
10920 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10923 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10929 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10930 /* check if packet requires linearization (packet is too fragmented)
10931 no need to check fragmentation if page size > 8K (there will be no
10932 violation to FW restrictions) */
10933 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10938 int first_bd_sz = 0;
10940 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10941 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10943 if (xmit_type & XMIT_GSO) {
10944 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10945 /* Check if LSO packet needs to be copied:
10946 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10947 int wnd_size = MAX_FETCH_BD - 3;
10948 /* Number of windows to check */
10949 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10954 /* Headers length */
10955 hlen = (int)(skb_transport_header(skb) - skb->data) +
10958 /* Amount of data (w/o headers) on linear part of SKB*/
10959 first_bd_sz = skb_headlen(skb) - hlen;
10961 wnd_sum = first_bd_sz;
10963 /* Calculate the first sum - it's special */
10964 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10966 skb_shinfo(skb)->frags[frag_idx].size;
10968 /* If there was data on linear skb data - check it */
10969 if (first_bd_sz > 0) {
10970 if (unlikely(wnd_sum < lso_mss)) {
10975 wnd_sum -= first_bd_sz;
10978 /* Others are easier: run through the frag list and
10979 check all windows */
10980 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10982 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10984 if (unlikely(wnd_sum < lso_mss)) {
10989 skb_shinfo(skb)->frags[wnd_idx].size;
10992 /* in non-LSO too fragmented packet should always
10999 if (unlikely(to_copy))
11000 DP(NETIF_MSG_TX_QUEUED,
11001 "Linearization IS REQUIRED for %s packet. "
11002 "num_frags %d hlen %d first_bd_sz %d\n",
11003 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11004 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11010 /* called with netif_tx_lock
11011 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11012 * netif_wake_queue()
11014 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11016 struct bnx2x *bp = netdev_priv(dev);
11017 struct bnx2x_fastpath *fp, *fp_stat;
11018 struct netdev_queue *txq;
11019 struct sw_tx_bd *tx_buf;
11020 struct eth_tx_start_bd *tx_start_bd;
11021 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11022 struct eth_tx_parse_bd *pbd = NULL;
11023 u16 pkt_prod, bd_prod;
11025 dma_addr_t mapping;
11026 u32 xmit_type = bnx2x_xmit_type(bp, skb);
11029 __le16 pkt_size = 0;
11031 #ifdef BNX2X_STOP_ON_ERROR
11032 if (unlikely(bp->panic))
11033 return NETDEV_TX_BUSY;
11036 fp_index = skb_get_queue_mapping(skb);
11037 txq = netdev_get_tx_queue(dev, fp_index);
11039 fp = &bp->fp[fp_index + bp->num_rx_queues];
11040 fp_stat = &bp->fp[fp_index];
11042 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11043 fp_stat->eth_q_stats.driver_xoff++;
11044 netif_tx_stop_queue(txq);
11045 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11046 return NETDEV_TX_BUSY;
11049 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11050 " gso type %x xmit_type %x\n",
11051 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11052 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11054 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11055 /* First, check if we need to linearize the skb (due to FW
11056 restrictions). No need to check fragmentation if page size > 8K
11057 (there will be no violation to FW restrictions) */
11058 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11059 /* Statistics of linearization */
11061 if (skb_linearize(skb) != 0) {
11062 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11063 "silently dropping this SKB\n");
11064 dev_kfree_skb_any(skb);
11065 return NETDEV_TX_OK;
11071 Please read carefully. First we use one BD which we mark as start,
11072 then we have a parsing info BD (used for TSO or xsum),
11073 and only then we have the rest of the TSO BDs.
11074 (don't forget to mark the last one as last,
11075 and to unmap only AFTER you write to the BD ...)
11076 And above all, all pdb sizes are in words - NOT DWORDS!
11079 pkt_prod = fp->tx_pkt_prod++;
11080 bd_prod = TX_BD(fp->tx_bd_prod);
11082 /* get a tx_buf and first BD */
11083 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11084 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11086 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11087 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11088 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11090 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11092 /* remember the first BD of the packet */
11093 tx_buf->first_bd = fp->tx_bd_prod;
11097 DP(NETIF_MSG_TX_QUEUED,
11098 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11099 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11102 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11103 (bp->flags & HW_VLAN_TX_FLAG)) {
11104 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11105 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11108 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11110 /* turn on parsing and get a BD */
11111 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11112 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11114 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11116 if (xmit_type & XMIT_CSUM) {
11117 hlen = (skb_network_header(skb) - skb->data) / 2;
11119 /* for now NS flag is not used in Linux */
11121 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11122 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11124 pbd->ip_hlen = (skb_transport_header(skb) -
11125 skb_network_header(skb)) / 2;
11127 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11129 pbd->total_hlen = cpu_to_le16(hlen);
11132 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11134 if (xmit_type & XMIT_CSUM_V4)
11135 tx_start_bd->bd_flags.as_bitfield |=
11136 ETH_TX_BD_FLAGS_IP_CSUM;
11138 tx_start_bd->bd_flags.as_bitfield |=
11139 ETH_TX_BD_FLAGS_IPV6;
11141 if (xmit_type & XMIT_CSUM_TCP) {
11142 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11145 s8 fix = SKB_CS_OFF(skb); /* signed! */
11147 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11149 DP(NETIF_MSG_TX_QUEUED,
11150 "hlen %d fix %d csum before fix %x\n",
11151 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11153 /* HW bug: fixup the CSUM */
11154 pbd->tcp_pseudo_csum =
11155 bnx2x_csum_fix(skb_transport_header(skb),
11158 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11159 pbd->tcp_pseudo_csum);
11163 mapping = pci_map_single(bp->pdev, skb->data,
11164 skb_headlen(skb), PCI_DMA_TODEVICE);
11166 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11167 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11168 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11169 tx_start_bd->nbd = cpu_to_le16(nbd);
11170 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11171 pkt_size = tx_start_bd->nbytes;
11173 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11174 " nbytes %d flags %x vlan %x\n",
11175 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11176 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11177 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11179 if (xmit_type & XMIT_GSO) {
11181 DP(NETIF_MSG_TX_QUEUED,
11182 "TSO packet len %d hlen %d total len %d tso size %d\n",
11183 skb->len, hlen, skb_headlen(skb),
11184 skb_shinfo(skb)->gso_size);
11186 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11188 if (unlikely(skb_headlen(skb) > hlen))
11189 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11190 hlen, bd_prod, ++nbd);
11192 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11193 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11194 pbd->tcp_flags = pbd_tcp_flags(skb);
11196 if (xmit_type & XMIT_GSO_V4) {
11197 pbd->ip_id = swab16(ip_hdr(skb)->id);
11198 pbd->tcp_pseudo_csum =
11199 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11200 ip_hdr(skb)->daddr,
11201 0, IPPROTO_TCP, 0));
11204 pbd->tcp_pseudo_csum =
11205 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11206 &ipv6_hdr(skb)->daddr,
11207 0, IPPROTO_TCP, 0));
11209 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11211 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11213 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11214 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11216 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11217 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11218 if (total_pkt_bd == NULL)
11219 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11221 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11222 frag->size, PCI_DMA_TODEVICE);
11224 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11225 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11226 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11227 le16_add_cpu(&pkt_size, frag->size);
11229 DP(NETIF_MSG_TX_QUEUED,
11230 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11231 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11232 le16_to_cpu(tx_data_bd->nbytes));
11235 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11237 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11239 /* now send a tx doorbell, counting the next BD
11240 * if the packet contains or ends with it
11242 if (TX_BD_POFF(bd_prod) < nbd)
11245 if (total_pkt_bd != NULL)
11246 total_pkt_bd->total_pkt_bytes = pkt_size;
11249 DP(NETIF_MSG_TX_QUEUED,
11250 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11251 " tcp_flags %x xsum %x seq %u hlen %u\n",
11252 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11253 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11254 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11256 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11259 * Make sure that the BD data is updated before updating the producer
11260 * since FW might read the BD right after the producer is updated.
11261 * This is only applicable for weak-ordered memory model archs such
11262 * as IA-64. The following barrier is also mandatory since FW will
11263 * assumes packets must have BDs.
11267 fp->tx_db.data.prod += nbd;
11269 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11273 fp->tx_bd_prod += nbd;
11275 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11276 netif_tx_stop_queue(txq);
11277 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11278 if we put Tx into XOFF state. */
11280 fp_stat->eth_q_stats.driver_xoff++;
11281 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11282 netif_tx_wake_queue(txq);
11286 return NETDEV_TX_OK;
11289 /* called with rtnl_lock */
11290 static int bnx2x_open(struct net_device *dev)
11292 struct bnx2x *bp = netdev_priv(dev);
11294 netif_carrier_off(dev);
11296 bnx2x_set_power_state(bp, PCI_D0);
11298 return bnx2x_nic_load(bp, LOAD_OPEN);
11301 /* called with rtnl_lock */
11302 static int bnx2x_close(struct net_device *dev)
11304 struct bnx2x *bp = netdev_priv(dev);
11306 /* Unload the driver, release IRQs */
11307 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11308 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11309 if (!CHIP_REV_IS_SLOW(bp))
11310 bnx2x_set_power_state(bp, PCI_D3hot);
11315 /* called with netif_tx_lock from dev_mcast.c */
11316 static void bnx2x_set_rx_mode(struct net_device *dev)
11318 struct bnx2x *bp = netdev_priv(dev);
11319 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11320 int port = BP_PORT(bp);
11322 if (bp->state != BNX2X_STATE_OPEN) {
11323 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11327 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11329 if (dev->flags & IFF_PROMISC)
11330 rx_mode = BNX2X_RX_MODE_PROMISC;
11332 else if ((dev->flags & IFF_ALLMULTI) ||
11333 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11334 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11336 else { /* some multicasts */
11337 if (CHIP_IS_E1(bp)) {
11338 int i, old, offset;
11339 struct dev_mc_list *mclist;
11340 struct mac_configuration_cmd *config =
11341 bnx2x_sp(bp, mcast_config);
11343 for (i = 0, mclist = dev->mc_list;
11344 mclist && (i < dev->mc_count);
11345 i++, mclist = mclist->next) {
11347 config->config_table[i].
11348 cam_entry.msb_mac_addr =
11349 swab16(*(u16 *)&mclist->dmi_addr[0]);
11350 config->config_table[i].
11351 cam_entry.middle_mac_addr =
11352 swab16(*(u16 *)&mclist->dmi_addr[2]);
11353 config->config_table[i].
11354 cam_entry.lsb_mac_addr =
11355 swab16(*(u16 *)&mclist->dmi_addr[4]);
11356 config->config_table[i].cam_entry.flags =
11358 config->config_table[i].
11359 target_table_entry.flags = 0;
11360 config->config_table[i].target_table_entry.
11361 clients_bit_vector =
11362 cpu_to_le32(1 << BP_L_ID(bp));
11363 config->config_table[i].
11364 target_table_entry.vlan_id = 0;
11367 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11368 config->config_table[i].
11369 cam_entry.msb_mac_addr,
11370 config->config_table[i].
11371 cam_entry.middle_mac_addr,
11372 config->config_table[i].
11373 cam_entry.lsb_mac_addr);
11375 old = config->hdr.length;
11377 for (; i < old; i++) {
11378 if (CAM_IS_INVALID(config->
11379 config_table[i])) {
11380 /* already invalidated */
11384 CAM_INVALIDATE(config->
11389 if (CHIP_REV_IS_SLOW(bp))
11390 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11392 offset = BNX2X_MAX_MULTICAST*(1 + port);
11394 config->hdr.length = i;
11395 config->hdr.offset = offset;
11396 config->hdr.client_id = bp->fp->cl_id;
11397 config->hdr.reserved1 = 0;
11399 bp->set_mac_pending++;
11402 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11403 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11404 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11407 /* Accept one or more multicasts */
11408 struct dev_mc_list *mclist;
11409 u32 mc_filter[MC_HASH_SIZE];
11410 u32 crc, bit, regidx;
11413 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11415 for (i = 0, mclist = dev->mc_list;
11416 mclist && (i < dev->mc_count);
11417 i++, mclist = mclist->next) {
11419 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11422 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11423 bit = (crc >> 24) & 0xff;
11426 mc_filter[regidx] |= (1 << bit);
11429 for (i = 0; i < MC_HASH_SIZE; i++)
11430 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11435 bp->rx_mode = rx_mode;
11436 bnx2x_set_storm_rx_mode(bp);
11439 /* called with rtnl_lock */
11440 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11442 struct sockaddr *addr = p;
11443 struct bnx2x *bp = netdev_priv(dev);
11445 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11448 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11449 if (netif_running(dev)) {
11450 if (CHIP_IS_E1(bp))
11451 bnx2x_set_eth_mac_addr_e1(bp, 1);
11453 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11459 /* called with rtnl_lock */
11460 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11461 int devad, u16 addr)
11463 struct bnx2x *bp = netdev_priv(netdev);
11466 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11468 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11469 prtad, devad, addr);
11471 if (prtad != bp->mdio.prtad) {
11472 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11473 prtad, bp->mdio.prtad);
11477 /* The HW expects different devad if CL22 is used */
11478 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11480 bnx2x_acquire_phy_lock(bp);
11481 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11482 devad, addr, &value);
11483 bnx2x_release_phy_lock(bp);
11484 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11491 /* called with rtnl_lock */
11492 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11493 u16 addr, u16 value)
11495 struct bnx2x *bp = netdev_priv(netdev);
11496 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11499 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11500 " value 0x%x\n", prtad, devad, addr, value);
11502 if (prtad != bp->mdio.prtad) {
11503 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11504 prtad, bp->mdio.prtad);
11508 /* The HW expects different devad if CL22 is used */
11509 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11511 bnx2x_acquire_phy_lock(bp);
11512 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11513 devad, addr, value);
11514 bnx2x_release_phy_lock(bp);
11518 /* called with rtnl_lock */
11519 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11521 struct bnx2x *bp = netdev_priv(dev);
11522 struct mii_ioctl_data *mdio = if_mii(ifr);
11524 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11525 mdio->phy_id, mdio->reg_num, mdio->val_in);
11527 if (!netif_running(dev))
11530 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11533 /* called with rtnl_lock */
11534 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11536 struct bnx2x *bp = netdev_priv(dev);
11539 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11540 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11543 /* This does not race with packet allocation
11544 * because the actual alloc size is
11545 * only updated as part of load
11547 dev->mtu = new_mtu;
11549 if (netif_running(dev)) {
11550 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11551 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11557 static void bnx2x_tx_timeout(struct net_device *dev)
11559 struct bnx2x *bp = netdev_priv(dev);
11561 #ifdef BNX2X_STOP_ON_ERROR
11565 /* This allows the netif to be shutdown gracefully before resetting */
11566 schedule_work(&bp->reset_task);
11570 /* called with rtnl_lock */
11571 static void bnx2x_vlan_rx_register(struct net_device *dev,
11572 struct vlan_group *vlgrp)
11574 struct bnx2x *bp = netdev_priv(dev);
11578 /* Set flags according to the required capabilities */
11579 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11581 if (dev->features & NETIF_F_HW_VLAN_TX)
11582 bp->flags |= HW_VLAN_TX_FLAG;
11584 if (dev->features & NETIF_F_HW_VLAN_RX)
11585 bp->flags |= HW_VLAN_RX_FLAG;
11587 if (netif_running(dev))
11588 bnx2x_set_client_config(bp);
11593 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11594 static void poll_bnx2x(struct net_device *dev)
11596 struct bnx2x *bp = netdev_priv(dev);
11598 disable_irq(bp->pdev->irq);
11599 bnx2x_interrupt(bp->pdev->irq, dev);
11600 enable_irq(bp->pdev->irq);
11604 static const struct net_device_ops bnx2x_netdev_ops = {
11605 .ndo_open = bnx2x_open,
11606 .ndo_stop = bnx2x_close,
11607 .ndo_start_xmit = bnx2x_start_xmit,
11608 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11609 .ndo_set_mac_address = bnx2x_change_mac_addr,
11610 .ndo_validate_addr = eth_validate_addr,
11611 .ndo_do_ioctl = bnx2x_ioctl,
11612 .ndo_change_mtu = bnx2x_change_mtu,
11613 .ndo_tx_timeout = bnx2x_tx_timeout,
11615 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11617 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11618 .ndo_poll_controller = poll_bnx2x,
11622 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11623 struct net_device *dev)
11628 SET_NETDEV_DEV(dev, &pdev->dev);
11629 bp = netdev_priv(dev);
11634 bp->func = PCI_FUNC(pdev->devfn);
11636 rc = pci_enable_device(pdev);
11638 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11642 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11643 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11646 goto err_out_disable;
11649 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11650 printk(KERN_ERR PFX "Cannot find second PCI device"
11651 " base address, aborting\n");
11653 goto err_out_disable;
11656 if (atomic_read(&pdev->enable_cnt) == 1) {
11657 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11659 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11661 goto err_out_disable;
11664 pci_set_master(pdev);
11665 pci_save_state(pdev);
11668 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11669 if (bp->pm_cap == 0) {
11670 printk(KERN_ERR PFX "Cannot find power management"
11671 " capability, aborting\n");
11673 goto err_out_release;
11676 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11677 if (bp->pcie_cap == 0) {
11678 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11681 goto err_out_release;
11684 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11685 bp->flags |= USING_DAC_FLAG;
11686 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11687 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11688 " failed, aborting\n");
11690 goto err_out_release;
11693 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11694 printk(KERN_ERR PFX "System does not support DMA,"
11697 goto err_out_release;
11700 dev->mem_start = pci_resource_start(pdev, 0);
11701 dev->base_addr = dev->mem_start;
11702 dev->mem_end = pci_resource_end(pdev, 0);
11704 dev->irq = pdev->irq;
11706 bp->regview = pci_ioremap_bar(pdev, 0);
11707 if (!bp->regview) {
11708 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11710 goto err_out_release;
11713 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11714 min_t(u64, BNX2X_DB_SIZE,
11715 pci_resource_len(pdev, 2)));
11716 if (!bp->doorbells) {
11717 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11719 goto err_out_unmap;
11722 bnx2x_set_power_state(bp, PCI_D0);
11724 /* clean indirect addresses */
11725 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11726 PCICFG_VENDOR_ID_OFFSET);
11727 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11728 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11729 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11730 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11732 dev->watchdog_timeo = TX_TIMEOUT;
11734 dev->netdev_ops = &bnx2x_netdev_ops;
11735 dev->ethtool_ops = &bnx2x_ethtool_ops;
11736 dev->features |= NETIF_F_SG;
11737 dev->features |= NETIF_F_HW_CSUM;
11738 if (bp->flags & USING_DAC_FLAG)
11739 dev->features |= NETIF_F_HIGHDMA;
11740 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11741 dev->features |= NETIF_F_TSO6;
11743 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11744 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11746 dev->vlan_features |= NETIF_F_SG;
11747 dev->vlan_features |= NETIF_F_HW_CSUM;
11748 if (bp->flags & USING_DAC_FLAG)
11749 dev->vlan_features |= NETIF_F_HIGHDMA;
11750 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11751 dev->vlan_features |= NETIF_F_TSO6;
11754 /* get_port_hwinfo() will set prtad and mmds properly */
11755 bp->mdio.prtad = MDIO_PRTAD_NONE;
11757 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11758 bp->mdio.dev = dev;
11759 bp->mdio.mdio_read = bnx2x_mdio_read;
11760 bp->mdio.mdio_write = bnx2x_mdio_write;
11766 iounmap(bp->regview);
11767 bp->regview = NULL;
11769 if (bp->doorbells) {
11770 iounmap(bp->doorbells);
11771 bp->doorbells = NULL;
11775 if (atomic_read(&pdev->enable_cnt) == 1)
11776 pci_release_regions(pdev);
11779 pci_disable_device(pdev);
11780 pci_set_drvdata(pdev, NULL);
11786 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11787 int *width, int *speed)
11789 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11791 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11793 /* return value of 1=2.5GHz 2=5GHz */
11794 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11797 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11799 const struct firmware *firmware = bp->firmware;
11800 struct bnx2x_fw_file_hdr *fw_hdr;
11801 struct bnx2x_fw_file_section *sections;
11802 u32 offset, len, num_ops;
11807 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11810 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11811 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11813 /* Make sure none of the offsets and sizes make us read beyond
11814 * the end of the firmware data */
11815 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11816 offset = be32_to_cpu(sections[i].offset);
11817 len = be32_to_cpu(sections[i].len);
11818 if (offset + len > firmware->size) {
11819 printk(KERN_ERR PFX "Section %d length is out of "
11825 /* Likewise for the init_ops offsets */
11826 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11827 ops_offsets = (u16 *)(firmware->data + offset);
11828 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11830 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11831 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11832 printk(KERN_ERR PFX "Section offset %d is out of "
11838 /* Check FW version */
11839 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11840 fw_ver = firmware->data + offset;
11841 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11842 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11843 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11844 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11845 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11846 " Should be %d.%d.%d.%d\n",
11847 fw_ver[0], fw_ver[1], fw_ver[2],
11848 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11849 BCM_5710_FW_MINOR_VERSION,
11850 BCM_5710_FW_REVISION_VERSION,
11851 BCM_5710_FW_ENGINEERING_VERSION);
11858 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11860 const __be32 *source = (const __be32 *)_source;
11861 u32 *target = (u32 *)_target;
11864 for (i = 0; i < n/4; i++)
11865 target[i] = be32_to_cpu(source[i]);
11869 Ops array is stored in the following format:
11870 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11872 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11874 const __be32 *source = (const __be32 *)_source;
11875 struct raw_op *target = (struct raw_op *)_target;
11878 for (i = 0, j = 0; i < n/8; i++, j += 2) {
11879 tmp = be32_to_cpu(source[j]);
11880 target[i].op = (tmp >> 24) & 0xff;
11881 target[i].offset = tmp & 0xffffff;
11882 target[i].raw_data = be32_to_cpu(source[j+1]);
11886 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11888 const __be16 *source = (const __be16 *)_source;
11889 u16 *target = (u16 *)_target;
11892 for (i = 0; i < n/2; i++)
11893 target[i] = be16_to_cpu(source[i]);
11896 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11898 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11899 bp->arr = kmalloc(len, GFP_KERNEL); \
11901 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
11902 "for "#arr"\n", len); \
11905 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
11906 (u8 *)bp->arr, len); \
11909 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11911 char fw_file_name[40] = {0};
11912 struct bnx2x_fw_file_hdr *fw_hdr;
11915 /* Create a FW file name */
11916 if (CHIP_IS_E1(bp))
11917 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11919 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11921 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11922 BCM_5710_FW_MAJOR_VERSION,
11923 BCM_5710_FW_MINOR_VERSION,
11924 BCM_5710_FW_REVISION_VERSION,
11925 BCM_5710_FW_ENGINEERING_VERSION);
11927 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11929 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11931 printk(KERN_ERR PFX "Can't load firmware file %s\n",
11933 goto request_firmware_exit;
11936 rc = bnx2x_check_firmware(bp);
11938 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11939 goto request_firmware_exit;
11942 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11944 /* Initialize the pointers to the init arrays */
11946 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11949 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11952 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
11955 /* STORMs firmware */
11956 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11957 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11958 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
11959 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11960 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11961 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11962 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
11963 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11964 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11965 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11966 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
11967 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11968 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11969 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11970 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
11971 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11975 init_offsets_alloc_err:
11976 kfree(bp->init_ops);
11977 init_ops_alloc_err:
11978 kfree(bp->init_data);
11979 request_firmware_exit:
11980 release_firmware(bp->firmware);
11986 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11987 const struct pci_device_id *ent)
11989 struct net_device *dev = NULL;
11991 int pcie_width, pcie_speed;
11994 /* dev zeroed in init_etherdev */
11995 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11997 printk(KERN_ERR PFX "Cannot allocate net device\n");
12001 bp = netdev_priv(dev);
12002 bp->msglevel = debug;
12004 pci_set_drvdata(pdev, dev);
12006 rc = bnx2x_init_dev(pdev, dev);
12012 rc = bnx2x_init_bp(bp);
12014 goto init_one_exit;
12016 /* Set init arrays */
12017 rc = bnx2x_init_firmware(bp, &pdev->dev);
12019 printk(KERN_ERR PFX "Error loading firmware\n");
12020 goto init_one_exit;
12023 rc = register_netdev(dev);
12025 dev_err(&pdev->dev, "Cannot register net device\n");
12026 goto init_one_exit;
12029 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12030 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12031 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12032 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12033 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12034 dev->base_addr, bp->pdev->irq);
12035 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12041 iounmap(bp->regview);
12044 iounmap(bp->doorbells);
12048 if (atomic_read(&pdev->enable_cnt) == 1)
12049 pci_release_regions(pdev);
12051 pci_disable_device(pdev);
12052 pci_set_drvdata(pdev, NULL);
12057 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12059 struct net_device *dev = pci_get_drvdata(pdev);
12063 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12066 bp = netdev_priv(dev);
12068 unregister_netdev(dev);
12070 kfree(bp->init_ops_offsets);
12071 kfree(bp->init_ops);
12072 kfree(bp->init_data);
12073 release_firmware(bp->firmware);
12076 iounmap(bp->regview);
12079 iounmap(bp->doorbells);
12083 if (atomic_read(&pdev->enable_cnt) == 1)
12084 pci_release_regions(pdev);
12086 pci_disable_device(pdev);
12087 pci_set_drvdata(pdev, NULL);
12090 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12092 struct net_device *dev = pci_get_drvdata(pdev);
12096 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12099 bp = netdev_priv(dev);
12103 pci_save_state(pdev);
12105 if (!netif_running(dev)) {
12110 netif_device_detach(dev);
12112 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12114 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12121 static int bnx2x_resume(struct pci_dev *pdev)
12123 struct net_device *dev = pci_get_drvdata(pdev);
12128 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12131 bp = netdev_priv(dev);
12135 pci_restore_state(pdev);
12137 if (!netif_running(dev)) {
12142 bnx2x_set_power_state(bp, PCI_D0);
12143 netif_device_attach(dev);
12145 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12152 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12156 bp->state = BNX2X_STATE_ERROR;
12158 bp->rx_mode = BNX2X_RX_MODE_NONE;
12160 bnx2x_netif_stop(bp, 0);
12162 del_timer_sync(&bp->timer);
12163 bp->stats_state = STATS_STATE_DISABLED;
12164 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12167 bnx2x_free_irq(bp);
12169 if (CHIP_IS_E1(bp)) {
12170 struct mac_configuration_cmd *config =
12171 bnx2x_sp(bp, mcast_config);
12173 for (i = 0; i < config->hdr.length; i++)
12174 CAM_INVALIDATE(config->config_table[i]);
12177 /* Free SKBs, SGEs, TPA pool and driver internals */
12178 bnx2x_free_skbs(bp);
12179 for_each_rx_queue(bp, i)
12180 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12181 for_each_rx_queue(bp, i)
12182 netif_napi_del(&bnx2x_fp(bp, i, napi));
12183 bnx2x_free_mem(bp);
12185 bp->state = BNX2X_STATE_CLOSED;
12187 netif_carrier_off(bp->dev);
12192 static void bnx2x_eeh_recover(struct bnx2x *bp)
12196 mutex_init(&bp->port.phy_mutex);
12198 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12199 bp->link_params.shmem_base = bp->common.shmem_base;
12200 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12202 if (!bp->common.shmem_base ||
12203 (bp->common.shmem_base < 0xA0000) ||
12204 (bp->common.shmem_base >= 0xC0000)) {
12205 BNX2X_DEV_INFO("MCP not active\n");
12206 bp->flags |= NO_MCP_FLAG;
12210 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12211 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12212 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12213 BNX2X_ERR("BAD MCP validity signature\n");
12215 if (!BP_NOMCP(bp)) {
12216 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12217 & DRV_MSG_SEQ_NUMBER_MASK);
12218 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12223 * bnx2x_io_error_detected - called when PCI error is detected
12224 * @pdev: Pointer to PCI device
12225 * @state: The current pci connection state
12227 * This function is called after a PCI bus error affecting
12228 * this device has been detected.
12230 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12231 pci_channel_state_t state)
12233 struct net_device *dev = pci_get_drvdata(pdev);
12234 struct bnx2x *bp = netdev_priv(dev);
12238 netif_device_detach(dev);
12240 if (state == pci_channel_io_perm_failure) {
12242 return PCI_ERS_RESULT_DISCONNECT;
12245 if (netif_running(dev))
12246 bnx2x_eeh_nic_unload(bp);
12248 pci_disable_device(pdev);
12252 /* Request a slot reset */
12253 return PCI_ERS_RESULT_NEED_RESET;
12257 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12258 * @pdev: Pointer to PCI device
12260 * Restart the card from scratch, as if from a cold-boot.
12262 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12264 struct net_device *dev = pci_get_drvdata(pdev);
12265 struct bnx2x *bp = netdev_priv(dev);
12269 if (pci_enable_device(pdev)) {
12270 dev_err(&pdev->dev,
12271 "Cannot re-enable PCI device after reset\n");
12273 return PCI_ERS_RESULT_DISCONNECT;
12276 pci_set_master(pdev);
12277 pci_restore_state(pdev);
12279 if (netif_running(dev))
12280 bnx2x_set_power_state(bp, PCI_D0);
12284 return PCI_ERS_RESULT_RECOVERED;
12288 * bnx2x_io_resume - called when traffic can start flowing again
12289 * @pdev: Pointer to PCI device
12291 * This callback is called when the error recovery driver tells us that
12292 * its OK to resume normal operation.
12294 static void bnx2x_io_resume(struct pci_dev *pdev)
12296 struct net_device *dev = pci_get_drvdata(pdev);
12297 struct bnx2x *bp = netdev_priv(dev);
12301 bnx2x_eeh_recover(bp);
12303 if (netif_running(dev))
12304 bnx2x_nic_load(bp, LOAD_NORMAL);
12306 netif_device_attach(dev);
12311 static struct pci_error_handlers bnx2x_err_handler = {
12312 .error_detected = bnx2x_io_error_detected,
12313 .slot_reset = bnx2x_io_slot_reset,
12314 .resume = bnx2x_io_resume,
12317 static struct pci_driver bnx2x_pci_driver = {
12318 .name = DRV_MODULE_NAME,
12319 .id_table = bnx2x_pci_tbl,
12320 .probe = bnx2x_init_one,
12321 .remove = __devexit_p(bnx2x_remove_one),
12322 .suspend = bnx2x_suspend,
12323 .resume = bnx2x_resume,
12324 .err_handler = &bnx2x_err_handler,
12327 static int __init bnx2x_init(void)
12331 printk(KERN_INFO "%s", version);
12333 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12334 if (bnx2x_wq == NULL) {
12335 printk(KERN_ERR PFX "Cannot create workqueue\n");
12339 ret = pci_register_driver(&bnx2x_pci_driver);
12341 printk(KERN_ERR PFX "Cannot register driver\n");
12342 destroy_workqueue(bnx2x_wq);
12347 static void __exit bnx2x_cleanup(void)
12349 pci_unregister_driver(&bnx2x_pci_driver);
12351 destroy_workqueue(bnx2x_wq);
12354 module_init(bnx2x_init);
12355 module_exit(bnx2x_cleanup);