1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.52.1"
60 #define DRV_MODULE_RELDATE "2009/08/12"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
154 * locking is done by mcp
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
176 static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
203 struct dmae_command dmae;
204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 memset(&dmae, 0, sizeof(struct dmae_command));
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 DMAE_CMD_ENDIANITY_DW_SWAP |
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248 mutex_lock(&bp->dmae_mutex);
252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
260 BNX2X_ERR("DMAE timeout!\n");
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
271 mutex_unlock(&bp->dmae_mutex);
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
276 struct dmae_command dmae;
277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 memset(&dmae, 0, sizeof(struct dmae_command));
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
299 DMAE_CMD_ENDIANITY_DW_SWAP |
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
320 mutex_lock(&bp->dmae_mutex);
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329 while (*wb_comp != DMAE_COMP_VAL) {
332 BNX2X_ERR("DMAE timeout!\n");
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
346 mutex_unlock(&bp->dmae_mutex);
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 REG_RD_DMAE(bp, reg, wb_data, 2);
381 return HILO_U64(wb_data[0], wb_data[1]);
385 static int bnx2x_mc_assert(struct bnx2x *bp)
389 u32 row0, row1, row2, row3;
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
506 static void bnx2x_fw_dump(struct bnx2x *bp)
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513 mark = ((mark + 0x3) & ~0x3);
514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
516 printk(KERN_ERR PFX);
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
522 printk(KERN_CONT "%s", (char *)data);
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
529 printk(KERN_CONT "%s", (char *)data);
531 printk(KERN_ERR PFX "end of fw dump\n");
534 static void bnx2x_panic_dump(struct bnx2x *bp)
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
542 BNX2X_ERR("begin crash dump -----------------\n");
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
553 for_each_rx_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i];
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
559 i, fp->rx_bd_prod, fp->rx_bd_cons,
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579 fp->status_blk->c_status_block.status_block_index,
580 fp->tx_db.data.prod);
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590 for (j = start; j != end; j = RX_BD(j + 1)) {
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
600 for (j = start; j != end; j = RX_SGE(j + 1)) {
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 BNX2X_ERR("end crash dump -----------------\n");
646 static void bnx2x_int_enable(struct bnx2x *bp)
648 int port = BP_PORT(bp);
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
673 REG_WR(bp, addr, val);
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
681 REG_WR(bp, addr, val);
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
693 /* enable nig and gpio3 attention */
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
702 /* Make sure that interrupts are indeed enabled from here on */
706 static void bnx2x_int_disable(struct bnx2x *bp)
708 int port = BP_PORT(bp);
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
720 /* flush all outstanding writes */
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
733 /* disable interrupt handling */
734 atomic_inc(&bp->intr_sem);
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
741 /* make sure all ISRs are done */
743 synchronize_irq(bp->msix_table[0].vector);
748 for_each_queue(bp, i)
749 synchronize_irq(bp->msix_table[i + offset].vector);
751 synchronize_irq(bp->pdev->irq);
753 /* make sure sp_task is not running */
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
761 * General service functions
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765 u8 storm, u16 index, u8 op, u8 update)
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
769 struct igu_ack_register igu_ack;
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
782 /* Make sure that ACK is written */
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
789 struct host_status_block *fpsb = fp->status_blk;
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
818 * fast path service functions
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
823 /* Tell compiler that consumer and producer can change */
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
828 /* free skb in the packet ring at pos idx
829 * return idx of last bd freed
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
837 struct sk_buff *skb = tx_buf->skb;
838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853 BNX2X_ERR("BAD nbd!\n");
857 new_cons = nbd + tx_buf->first_bd;
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862 /* Skip a parse bd... */
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
885 dev_kfree_skb_any(skb);
886 tx_buf->first_bd = 0;
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
898 barrier(); /* Tell compiler that prod and cons can change */
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
906 #ifdef BNX2X_STOP_ON_ERROR
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
912 return (s16)(fp->bp->tx_ring_size) - used;
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
917 struct bnx2x *bp = fp->bp;
918 struct netdev_queue *txq;
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
922 #ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
931 while (sw_cons != hw_cons) {
934 pkt_cons = TX_BD(sw_cons);
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
939 hw_cons, sw_cons, pkt_cons);
941 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
954 /* TBD need a thresh? */
955 if (unlikely(netif_tx_queue_stopped(txq))) {
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
965 if ((netif_tx_queue_stopped(txq)) &&
966 (bp->state == BNX2X_STATE_OPEN) &&
967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968 netif_tx_wake_queue(txq);
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
985 fp->index, cid, command, bp->state,
986 rr_cqe->ramrod_cqe.ramrod_type);
991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
996 fp->state = BNX2X_FP_STATE_OPEN;
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1002 fp->state = BNX2X_FP_STATE_HALTED;
1006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
1014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041 bp->set_mac_pending--;
1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1047 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1048 bp->set_mac_pending--;
1053 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1054 command, bp->state);
1057 mb(); /* force bnx2x_wait_ramrod() to see the change */
1060 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1061 struct bnx2x_fastpath *fp, u16 index)
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct page *page = sw_buf->page;
1065 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1067 /* Skip "next page" elements */
1071 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1072 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1075 sw_buf->page = NULL;
1080 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1081 struct bnx2x_fastpath *fp, int last)
1085 for (i = 0; i < last; i++)
1086 bnx2x_free_rx_sge(bp, fp, i);
1089 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1090 struct bnx2x_fastpath *fp, u16 index)
1092 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1093 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1094 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1097 if (unlikely(page == NULL))
1100 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1101 PCI_DMA_FROMDEVICE);
1102 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1103 __free_pages(page, PAGES_PER_SGE_SHIFT);
1107 sw_buf->page = page;
1108 pci_unmap_addr_set(sw_buf, mapping, mapping);
1110 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1111 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1116 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1117 struct bnx2x_fastpath *fp, u16 index)
1119 struct sk_buff *skb;
1120 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1121 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1124 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1125 if (unlikely(skb == NULL))
1128 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1129 PCI_DMA_FROMDEVICE);
1130 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1136 pci_unmap_addr_set(rx_buf, mapping, mapping);
1138 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1139 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1144 /* note that we are not allocating a new skb,
1145 * we are just moving one from cons to prod
1146 * we are not creating a new mapping,
1147 * so there is no need to check for dma_mapping_error().
1149 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1150 struct sk_buff *skb, u16 cons, u16 prod)
1152 struct bnx2x *bp = fp->bp;
1153 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1154 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1155 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1156 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1158 pci_dma_sync_single_for_device(bp->pdev,
1159 pci_unmap_addr(cons_rx_buf, mapping),
1160 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1162 prod_rx_buf->skb = cons_rx_buf->skb;
1163 pci_unmap_addr_set(prod_rx_buf, mapping,
1164 pci_unmap_addr(cons_rx_buf, mapping));
1165 *prod_bd = *cons_bd;
1168 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1171 u16 last_max = fp->last_max_sge;
1173 if (SUB_S16(idx, last_max) > 0)
1174 fp->last_max_sge = idx;
1177 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1181 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1182 int idx = RX_SGE_CNT * i - 1;
1184 for (j = 0; j < 2; j++) {
1185 SGE_MASK_CLEAR_BIT(fp, idx);
1191 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1192 struct eth_fast_path_rx_cqe *fp_cqe)
1194 struct bnx2x *bp = fp->bp;
1195 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1196 le16_to_cpu(fp_cqe->len_on_bd)) >>
1198 u16 last_max, last_elem, first_elem;
1205 /* First mark all used pages */
1206 for (i = 0; i < sge_len; i++)
1207 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1209 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1210 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1212 /* Here we assume that the last SGE index is the biggest */
1213 prefetch((void *)(fp->sge_mask));
1214 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1216 last_max = RX_SGE(fp->last_max_sge);
1217 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1218 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1220 /* If ring is not full */
1221 if (last_elem + 1 != first_elem)
1224 /* Now update the prod */
1225 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1226 if (likely(fp->sge_mask[i]))
1229 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1230 delta += RX_SGE_MASK_ELEM_SZ;
1234 fp->rx_sge_prod += delta;
1235 /* clear page-end entries */
1236 bnx2x_clear_sge_mask_next_elems(fp);
1239 DP(NETIF_MSG_RX_STATUS,
1240 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1241 fp->last_max_sge, fp->rx_sge_prod);
1244 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1246 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1247 memset(fp->sge_mask, 0xff,
1248 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1250 /* Clear the two last indices in the page to 1:
1251 these are the indices that correspond to the "next" element,
1252 hence will never be indicated and should be removed from
1253 the calculations. */
1254 bnx2x_clear_sge_mask_next_elems(fp);
1257 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1258 struct sk_buff *skb, u16 cons, u16 prod)
1260 struct bnx2x *bp = fp->bp;
1261 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1262 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1263 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1266 /* move empty skb from pool to prod and map it */
1267 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1268 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1269 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1270 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1272 /* move partial skb from cons to pool (don't unmap yet) */
1273 fp->tpa_pool[queue] = *cons_rx_buf;
1275 /* mark bin state as start - print error if current state != stop */
1276 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1277 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1279 fp->tpa_state[queue] = BNX2X_TPA_START;
1281 /* point prod_bd to new skb */
1282 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1283 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1285 #ifdef BNX2X_STOP_ON_ERROR
1286 fp->tpa_queue_used |= (1 << queue);
1287 #ifdef __powerpc64__
1288 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1290 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1292 fp->tpa_queue_used);
1296 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297 struct sk_buff *skb,
1298 struct eth_fast_path_rx_cqe *fp_cqe,
1301 struct sw_rx_page *rx_pg, old_rx_pg;
1302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1303 u32 i, frag_len, frag_size, pages;
1307 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1308 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1310 /* This is needed in order to enable forwarding support */
1312 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1313 max(frag_size, (u32)len_on_bd));
1315 #ifdef BNX2X_STOP_ON_ERROR
1317 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1318 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1320 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1321 fp_cqe->pkt_len, len_on_bd);
1327 /* Run through the SGL and compose the fragmented skb */
1328 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1329 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1331 /* FW gives the indices of the SGE as if the ring is an array
1332 (meaning that "next" element will consume 2 indices) */
1333 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1334 rx_pg = &fp->rx_page_ring[sge_idx];
1337 /* If we fail to allocate a substitute page, we simply stop
1338 where we are and drop the whole packet */
1339 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1340 if (unlikely(err)) {
1341 fp->eth_q_stats.rx_skb_alloc_failed++;
1345 /* Unmap the page as we r going to pass it to the stack */
1346 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1347 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1349 /* Add one frag and update the appropriate fields in the skb */
1350 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1352 skb->data_len += frag_len;
1353 skb->truesize += frag_len;
1354 skb->len += frag_len;
1356 frag_size -= frag_len;
1362 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1363 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1366 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1367 struct sk_buff *skb = rx_buf->skb;
1369 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1371 /* Unmap skb in the pool anyway, as we are going to change
1372 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1374 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1375 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1377 if (likely(new_skb)) {
1378 /* fix ip xsum and give it to the stack */
1379 /* (no need to map the new skb) */
1382 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1383 PARSING_FLAGS_VLAN);
1384 int is_not_hwaccel_vlan_cqe =
1385 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1389 prefetch(((char *)(skb)) + 128);
1391 #ifdef BNX2X_STOP_ON_ERROR
1392 if (pad + len > bp->rx_buf_size) {
1393 BNX2X_ERR("skb_put is about to fail... "
1394 "pad %d len %d rx_buf_size %d\n",
1395 pad, len, bp->rx_buf_size);
1401 skb_reserve(skb, pad);
1404 skb->protocol = eth_type_trans(skb, bp->dev);
1405 skb->ip_summed = CHECKSUM_UNNECESSARY;
1410 iph = (struct iphdr *)skb->data;
1412 /* If there is no Rx VLAN offloading -
1413 take VLAN tag into an account */
1414 if (unlikely(is_not_hwaccel_vlan_cqe))
1415 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1418 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1421 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1422 &cqe->fast_path_cqe, cqe_idx)) {
1424 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1425 (!is_not_hwaccel_vlan_cqe))
1426 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1427 le16_to_cpu(cqe->fast_path_cqe.
1431 netif_receive_skb(skb);
1433 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1434 " - dropping packet!\n");
1439 /* put new skb in bin */
1440 fp->tpa_pool[queue].skb = new_skb;
1443 /* else drop the packet and keep the buffer in the bin */
1444 DP(NETIF_MSG_RX_STATUS,
1445 "Failed to allocate new skb - dropping packet!\n");
1446 fp->eth_q_stats.rx_skb_alloc_failed++;
1449 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1452 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1453 struct bnx2x_fastpath *fp,
1454 u16 bd_prod, u16 rx_comp_prod,
1457 struct ustorm_eth_rx_producers rx_prods = {0};
1460 /* Update producers */
1461 rx_prods.bd_prod = bd_prod;
1462 rx_prods.cqe_prod = rx_comp_prod;
1463 rx_prods.sge_prod = rx_sge_prod;
1466 * Make sure that the BD and SGE data is updated before updating the
1467 * producers since FW might read the BD/SGE right after the producer
1469 * This is only applicable for weak-ordered memory model archs such
1470 * as IA-64. The following barrier is also mandatory since FW will
1471 * assumes BDs must have buffers.
1475 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1476 REG_WR(bp, BAR_USTRORM_INTMEM +
1477 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1478 ((u32 *)&rx_prods)[i]);
1480 mmiowb(); /* keep prod updates ordered */
1482 DP(NETIF_MSG_RX_STATUS,
1483 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1484 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1487 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1489 struct bnx2x *bp = fp->bp;
1490 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1491 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1494 #ifdef BNX2X_STOP_ON_ERROR
1495 if (unlikely(bp->panic))
1499 /* CQ "next element" is of the size of the regular element,
1500 that's why it's ok here */
1501 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1502 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1505 bd_cons = fp->rx_bd_cons;
1506 bd_prod = fp->rx_bd_prod;
1507 bd_prod_fw = bd_prod;
1508 sw_comp_cons = fp->rx_comp_cons;
1509 sw_comp_prod = fp->rx_comp_prod;
1511 /* Memory barrier necessary as speculative reads of the rx
1512 * buffer can be ahead of the index in the status block
1516 DP(NETIF_MSG_RX_STATUS,
1517 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1518 fp->index, hw_comp_cons, sw_comp_cons);
1520 while (sw_comp_cons != hw_comp_cons) {
1521 struct sw_rx_bd *rx_buf = NULL;
1522 struct sk_buff *skb;
1523 union eth_rx_cqe *cqe;
1527 comp_ring_cons = RCQ_BD(sw_comp_cons);
1528 bd_prod = RX_BD(bd_prod);
1529 bd_cons = RX_BD(bd_cons);
1531 /* Prefetch the page containing the BD descriptor
1532 at producer's index. It will be needed when new skb is
1534 prefetch((void *)(PAGE_ALIGN((unsigned long)
1535 (&fp->rx_desc_ring[bd_prod])) -
1538 cqe = &fp->rx_comp_ring[comp_ring_cons];
1539 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1541 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1542 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1543 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1544 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1545 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1546 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1548 /* is this a slowpath msg? */
1549 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1550 bnx2x_sp_event(fp, cqe);
1553 /* this is an rx packet */
1555 rx_buf = &fp->rx_buf_ring[bd_cons];
1557 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1558 pad = cqe->fast_path_cqe.placement_offset;
1560 /* If CQE is marked both TPA_START and TPA_END
1561 it is a non-TPA CQE */
1562 if ((!fp->disable_tpa) &&
1563 (TPA_TYPE(cqe_fp_flags) !=
1564 (TPA_TYPE_START | TPA_TYPE_END))) {
1565 u16 queue = cqe->fast_path_cqe.queue_index;
1567 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1568 DP(NETIF_MSG_RX_STATUS,
1569 "calling tpa_start on queue %d\n",
1572 bnx2x_tpa_start(fp, queue, skb,
1577 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1578 DP(NETIF_MSG_RX_STATUS,
1579 "calling tpa_stop on queue %d\n",
1582 if (!BNX2X_RX_SUM_FIX(cqe))
1583 BNX2X_ERR("STOP on none TCP "
1586 /* This is a size of the linear data
1588 len = le16_to_cpu(cqe->fast_path_cqe.
1590 bnx2x_tpa_stop(bp, fp, queue, pad,
1591 len, cqe, comp_ring_cons);
1592 #ifdef BNX2X_STOP_ON_ERROR
1597 bnx2x_update_sge_prod(fp,
1598 &cqe->fast_path_cqe);
1603 pci_dma_sync_single_for_device(bp->pdev,
1604 pci_unmap_addr(rx_buf, mapping),
1605 pad + RX_COPY_THRESH,
1606 PCI_DMA_FROMDEVICE);
1608 prefetch(((char *)(skb)) + 128);
1610 /* is this an error packet? */
1611 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1612 DP(NETIF_MSG_RX_ERR,
1613 "ERROR flags %x rx packet %u\n",
1614 cqe_fp_flags, sw_comp_cons);
1615 fp->eth_q_stats.rx_err_discard_pkt++;
1619 /* Since we don't have a jumbo ring
1620 * copy small packets if mtu > 1500
1622 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1623 (len <= RX_COPY_THRESH)) {
1624 struct sk_buff *new_skb;
1626 new_skb = netdev_alloc_skb(bp->dev,
1628 if (new_skb == NULL) {
1629 DP(NETIF_MSG_RX_ERR,
1630 "ERROR packet dropped "
1631 "because of alloc failure\n");
1632 fp->eth_q_stats.rx_skb_alloc_failed++;
1637 skb_copy_from_linear_data_offset(skb, pad,
1638 new_skb->data + pad, len);
1639 skb_reserve(new_skb, pad);
1640 skb_put(new_skb, len);
1642 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1647 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1648 pci_unmap_single(bp->pdev,
1649 pci_unmap_addr(rx_buf, mapping),
1651 PCI_DMA_FROMDEVICE);
1652 skb_reserve(skb, pad);
1656 DP(NETIF_MSG_RX_ERR,
1657 "ERROR packet dropped because "
1658 "of alloc failure\n");
1659 fp->eth_q_stats.rx_skb_alloc_failed++;
1661 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1665 skb->protocol = eth_type_trans(skb, bp->dev);
1667 skb->ip_summed = CHECKSUM_NONE;
1669 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1670 skb->ip_summed = CHECKSUM_UNNECESSARY;
1672 fp->eth_q_stats.hw_csum_err++;
1676 skb_record_rx_queue(skb, fp->index);
1679 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1680 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1681 PARSING_FLAGS_VLAN))
1682 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1683 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1686 netif_receive_skb(skb);
1692 bd_cons = NEXT_RX_IDX(bd_cons);
1693 bd_prod = NEXT_RX_IDX(bd_prod);
1694 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1697 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1698 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1700 if (rx_pkt == budget)
1704 fp->rx_bd_cons = bd_cons;
1705 fp->rx_bd_prod = bd_prod_fw;
1706 fp->rx_comp_cons = sw_comp_cons;
1707 fp->rx_comp_prod = sw_comp_prod;
1709 /* Update producers */
1710 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1713 fp->rx_pkt += rx_pkt;
1719 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1721 struct bnx2x_fastpath *fp = fp_cookie;
1722 struct bnx2x *bp = fp->bp;
1724 /* Return here if interrupt is disabled */
1725 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1726 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1730 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1731 fp->index, fp->sb_id);
1732 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1734 #ifdef BNX2X_STOP_ON_ERROR
1735 if (unlikely(bp->panic))
1738 /* Handle Rx or Tx according to MSI-X vector */
1739 if (fp->is_rx_queue) {
1740 prefetch(fp->rx_cons_sb);
1741 prefetch(&fp->status_blk->u_status_block.status_block_index);
1743 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1749 bnx2x_update_fpsb_idx(fp);
1753 /* Re-enable interrupts */
1754 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1755 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1756 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1757 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1763 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1765 struct bnx2x *bp = netdev_priv(dev_instance);
1766 u16 status = bnx2x_ack_int(bp);
1770 /* Return here if interrupt is shared and it's not for us */
1771 if (unlikely(status == 0)) {
1772 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1775 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1777 /* Return here if interrupt is disabled */
1778 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1779 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1783 #ifdef BNX2X_STOP_ON_ERROR
1784 if (unlikely(bp->panic))
1788 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1789 struct bnx2x_fastpath *fp = &bp->fp[i];
1791 mask = 0x2 << fp->sb_id;
1792 if (status & mask) {
1793 /* Handle Rx or Tx according to SB id */
1794 if (fp->is_rx_queue) {
1795 prefetch(fp->rx_cons_sb);
1796 prefetch(&fp->status_blk->u_status_block.
1797 status_block_index);
1799 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1802 prefetch(fp->tx_cons_sb);
1803 prefetch(&fp->status_blk->c_status_block.
1804 status_block_index);
1806 bnx2x_update_fpsb_idx(fp);
1810 /* Re-enable interrupts */
1811 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1812 le16_to_cpu(fp->fp_u_idx),
1814 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1815 le16_to_cpu(fp->fp_c_idx),
1823 mask = 0x2 << CNIC_SB_ID(bp);
1824 if (status & (mask | 0x1)) {
1825 struct cnic_ops *c_ops = NULL;
1828 c_ops = rcu_dereference(bp->cnic_ops);
1830 c_ops->cnic_handler(bp->cnic_data, NULL);
1837 if (unlikely(status & 0x1)) {
1838 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1846 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1852 /* end of fast path */
1854 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1859 * General service functions
1862 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1865 u32 resource_bit = (1 << resource);
1866 int func = BP_FUNC(bp);
1867 u32 hw_lock_control_reg;
1870 /* Validating that the resource is within range */
1871 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1873 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1879 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1881 hw_lock_control_reg =
1882 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1885 /* Validating that the resource is not already taken */
1886 lock_status = REG_RD(bp, hw_lock_control_reg);
1887 if (lock_status & resource_bit) {
1888 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1889 lock_status, resource_bit);
1893 /* Try for 5 second every 5ms */
1894 for (cnt = 0; cnt < 1000; cnt++) {
1895 /* Try to acquire the lock */
1896 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1897 lock_status = REG_RD(bp, hw_lock_control_reg);
1898 if (lock_status & resource_bit)
1903 DP(NETIF_MSG_HW, "Timeout\n");
1907 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1910 u32 resource_bit = (1 << resource);
1911 int func = BP_FUNC(bp);
1912 u32 hw_lock_control_reg;
1914 /* Validating that the resource is within range */
1915 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1917 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1918 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1923 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1925 hw_lock_control_reg =
1926 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1929 /* Validating that the resource is currently taken */
1930 lock_status = REG_RD(bp, hw_lock_control_reg);
1931 if (!(lock_status & resource_bit)) {
1932 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1933 lock_status, resource_bit);
1937 REG_WR(bp, hw_lock_control_reg, resource_bit);
1941 /* HW Lock for shared dual port PHYs */
1942 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1944 mutex_lock(&bp->port.phy_mutex);
1946 if (bp->port.need_hw_lock)
1947 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1950 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1952 if (bp->port.need_hw_lock)
1953 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1955 mutex_unlock(&bp->port.phy_mutex);
1958 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1960 /* The GPIO should be swapped if swap register is set and active */
1961 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963 int gpio_shift = gpio_num +
1964 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965 u32 gpio_mask = (1 << gpio_shift);
1969 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1974 /* read GPIO value */
1975 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1977 /* get the requested pin value */
1978 if ((gpio_reg & gpio_mask) == gpio_mask)
1983 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1988 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 /* read GPIO and mask except the float bits */
2005 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2008 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2009 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2010 gpio_num, gpio_shift);
2011 /* clear FLOAT and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2016 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2018 gpio_num, gpio_shift);
2019 /* clear FLOAT and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2024 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2025 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2026 gpio_num, gpio_shift);
2028 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2035 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2036 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2041 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2043 /* The GPIO should be swapped if swap register is set and active */
2044 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046 int gpio_shift = gpio_num +
2047 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048 u32 gpio_mask = (1 << gpio_shift);
2051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2058 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2061 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2062 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2063 "output low\n", gpio_num, gpio_shift);
2064 /* clear SET and set CLR */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2069 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2070 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2071 "output high\n", gpio_num, gpio_shift);
2072 /* clear CLR and set SET */
2073 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2081 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2082 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2087 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2089 u32 spio_mask = (1 << spio_num);
2092 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2093 (spio_num > MISC_REGISTERS_SPIO_7)) {
2094 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2098 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2099 /* read SPIO and mask except the float bits */
2100 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2103 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2104 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2105 /* clear FLOAT and set CLR */
2106 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2107 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2110 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2111 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2112 /* clear FLOAT and set SET */
2113 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2117 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2118 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2120 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2127 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2128 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2133 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2135 switch (bp->link_vars.ieee_fc &
2136 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2137 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2138 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2142 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2143 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2147 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2148 bp->port.advertising |= ADVERTISED_Asym_Pause;
2152 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2158 static void bnx2x_link_report(struct bnx2x *bp)
2160 if (bp->state == BNX2X_STATE_DISABLED) {
2161 netif_carrier_off(bp->dev);
2162 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2166 if (bp->link_vars.link_up) {
2167 if (bp->state == BNX2X_STATE_OPEN)
2168 netif_carrier_on(bp->dev);
2169 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2171 printk("%d Mbps ", bp->link_vars.line_speed);
2173 if (bp->link_vars.duplex == DUPLEX_FULL)
2174 printk("full duplex");
2176 printk("half duplex");
2178 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2179 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2180 printk(", receive ");
2181 if (bp->link_vars.flow_ctrl &
2183 printk("& transmit ");
2185 printk(", transmit ");
2187 printk("flow control ON");
2191 } else { /* link_down */
2192 netif_carrier_off(bp->dev);
2193 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2197 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2199 if (!BP_NOMCP(bp)) {
2202 /* Initialize link parameters structure variables */
2203 /* It is recommended to turn off RX FC for jumbo frames
2204 for better performance */
2205 if (bp->dev->mtu > 5000)
2206 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2208 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2210 bnx2x_acquire_phy_lock(bp);
2212 if (load_mode == LOAD_DIAG)
2213 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2215 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2217 bnx2x_release_phy_lock(bp);
2219 bnx2x_calc_fc_adv(bp);
2221 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2222 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2223 bnx2x_link_report(bp);
2228 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2232 static void bnx2x_link_set(struct bnx2x *bp)
2234 if (!BP_NOMCP(bp)) {
2235 bnx2x_acquire_phy_lock(bp);
2236 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2237 bnx2x_release_phy_lock(bp);
2239 bnx2x_calc_fc_adv(bp);
2241 BNX2X_ERR("Bootcode is missing - can not set link\n");
2244 static void bnx2x__link_reset(struct bnx2x *bp)
2246 if (!BP_NOMCP(bp)) {
2247 bnx2x_acquire_phy_lock(bp);
2248 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2249 bnx2x_release_phy_lock(bp);
2251 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2254 static u8 bnx2x_link_test(struct bnx2x *bp)
2258 bnx2x_acquire_phy_lock(bp);
2259 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2260 bnx2x_release_phy_lock(bp);
2265 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2267 u32 r_param = bp->link_vars.line_speed / 8;
2268 u32 fair_periodic_timeout_usec;
2271 memset(&(bp->cmng.rs_vars), 0,
2272 sizeof(struct rate_shaping_vars_per_port));
2273 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2275 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2276 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2278 /* this is the threshold below which no timer arming will occur
2279 1.25 coefficient is for the threshold to be a little bigger
2280 than the real time, to compensate for timer in-accuracy */
2281 bp->cmng.rs_vars.rs_threshold =
2282 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2284 /* resolution of fairness timer */
2285 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2286 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2287 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2289 /* this is the threshold below which we won't arm the timer anymore */
2290 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2292 /* we multiply by 1e3/8 to get bytes/msec.
2293 We don't want the credits to pass a credit
2294 of the t_fair*FAIR_MEM (algorithm resolution) */
2295 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2296 /* since each tick is 4 usec */
2297 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2300 /* Calculates the sum of vn_min_rates.
2301 It's needed for further normalizing of the min_rates.
2303 sum of vn_min_rates.
2305 0 - if all the min_rates are 0.
2306 In the later case fainess algorithm should be deactivated.
2307 If not all min_rates are zero then those that are zeroes will be set to 1.
2309 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2312 int port = BP_PORT(bp);
2315 bp->vn_weight_sum = 0;
2316 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2317 int func = 2*vn + port;
2318 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2320 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2322 /* Skip hidden vns */
2323 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2326 /* If min rate is zero - set it to 1 */
2328 vn_min_rate = DEF_MIN_RATE;
2332 bp->vn_weight_sum += vn_min_rate;
2335 /* ... only if all min rates are zeros - disable fairness */
2337 bp->cmng.flags.cmng_enables &=
2338 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2339 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2340 " fairness will be disabled\n");
2342 bp->cmng.flags.cmng_enables |=
2343 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2346 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2348 struct rate_shaping_vars_per_vn m_rs_vn;
2349 struct fairness_vars_per_vn m_fair_vn;
2350 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2351 u16 vn_min_rate, vn_max_rate;
2354 /* If function is hidden - set min and max to zeroes */
2355 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2360 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2361 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2362 /* If min rate is zero - set it to 1 */
2364 vn_min_rate = DEF_MIN_RATE;
2365 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2366 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2369 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2370 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2372 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2373 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2375 /* global vn counter - maximal Mbps for this vn */
2376 m_rs_vn.vn_counter.rate = vn_max_rate;
2378 /* quota - number of bytes transmitted in this period */
2379 m_rs_vn.vn_counter.quota =
2380 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2382 if (bp->vn_weight_sum) {
2383 /* credit for each period of the fairness algorithm:
2384 number of bytes in T_FAIR (the vn share the port rate).
2385 vn_weight_sum should not be larger than 10000, thus
2386 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2388 m_fair_vn.vn_credit_delta =
2389 max((u32)(vn_min_rate * (T_FAIR_COEF /
2390 (8 * bp->vn_weight_sum))),
2391 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2392 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2393 m_fair_vn.vn_credit_delta);
2396 /* Store it to internal memory */
2397 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2398 REG_WR(bp, BAR_XSTRORM_INTMEM +
2399 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2400 ((u32 *)(&m_rs_vn))[i]);
2402 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2403 REG_WR(bp, BAR_XSTRORM_INTMEM +
2404 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2405 ((u32 *)(&m_fair_vn))[i]);
2409 /* This function is called upon link interrupt */
2410 static void bnx2x_link_attn(struct bnx2x *bp)
2412 /* Make sure that we are synced with the current statistics */
2413 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2415 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2417 if (bp->link_vars.link_up) {
2419 /* dropless flow control */
2420 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2421 int port = BP_PORT(bp);
2422 u32 pause_enabled = 0;
2424 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2427 REG_WR(bp, BAR_USTRORM_INTMEM +
2428 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2432 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2433 struct host_port_stats *pstats;
2435 pstats = bnx2x_sp(bp, port_stats);
2436 /* reset old bmac stats */
2437 memset(&(pstats->mac_stx[0]), 0,
2438 sizeof(struct mac_stx));
2440 if ((bp->state == BNX2X_STATE_OPEN) ||
2441 (bp->state == BNX2X_STATE_DISABLED))
2442 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2445 /* indicate link status */
2446 bnx2x_link_report(bp);
2449 int port = BP_PORT(bp);
2453 /* Set the attention towards other drivers on the same port */
2454 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2455 if (vn == BP_E1HVN(bp))
2458 func = ((vn << 1) | port);
2459 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2460 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2463 if (bp->link_vars.link_up) {
2466 /* Init rate shaping and fairness contexts */
2467 bnx2x_init_port_minmax(bp);
2469 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2470 bnx2x_init_vn_minmax(bp, 2*vn + port);
2472 /* Store it to internal memory */
2474 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2475 REG_WR(bp, BAR_XSTRORM_INTMEM +
2476 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2477 ((u32 *)(&bp->cmng))[i]);
2482 static void bnx2x__link_status_update(struct bnx2x *bp)
2484 int func = BP_FUNC(bp);
2486 if (bp->state != BNX2X_STATE_OPEN)
2489 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2491 if (bp->link_vars.link_up)
2492 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2494 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2496 bnx2x_calc_vn_weight_sum(bp);
2498 /* indicate link status */
2499 bnx2x_link_report(bp);
2502 static void bnx2x_pmf_update(struct bnx2x *bp)
2504 int port = BP_PORT(bp);
2508 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2510 /* enable nig attention */
2511 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2512 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2513 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2515 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2523 * General service functions
2526 /* send the MCP a request, block until there is a reply */
2527 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2529 int func = BP_FUNC(bp);
2530 u32 seq = ++bp->fw_seq;
2533 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2535 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2536 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2539 /* let the FW do it's magic ... */
2542 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2544 /* Give the FW up to 2 second (200*10ms) */
2545 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2547 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2548 cnt*delay, rc, seq);
2550 /* is this a reply to our command? */
2551 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2552 rc &= FW_MSG_CODE_MASK;
2555 BNX2X_ERR("FW failed to respond!\n");
2563 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2564 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2565 static void bnx2x_set_rx_mode(struct net_device *dev);
2567 static void bnx2x_e1h_disable(struct bnx2x *bp)
2569 int port = BP_PORT(bp);
2572 bp->rx_mode = BNX2X_RX_MODE_NONE;
2573 bnx2x_set_storm_rx_mode(bp);
2575 netif_tx_disable(bp->dev);
2576 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2578 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2580 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2582 for (i = 0; i < MC_HASH_SIZE; i++)
2583 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2585 netif_carrier_off(bp->dev);
2588 static void bnx2x_e1h_enable(struct bnx2x *bp)
2590 int port = BP_PORT(bp);
2592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2594 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2596 /* Tx queue should be only reenabled */
2597 netif_tx_wake_all_queues(bp->dev);
2599 /* Initialize the receive filter. */
2600 bnx2x_set_rx_mode(bp->dev);
2603 static void bnx2x_update_min_max(struct bnx2x *bp)
2605 int port = BP_PORT(bp);
2608 /* Init rate shaping and fairness contexts */
2609 bnx2x_init_port_minmax(bp);
2611 bnx2x_calc_vn_weight_sum(bp);
2613 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2614 bnx2x_init_vn_minmax(bp, 2*vn + port);
2619 /* Set the attention towards other drivers on the same port */
2620 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2621 if (vn == BP_E1HVN(bp))
2624 func = ((vn << 1) | port);
2625 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2626 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2629 /* Store it to internal memory */
2630 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2631 REG_WR(bp, BAR_XSTRORM_INTMEM +
2632 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2633 ((u32 *)(&bp->cmng))[i]);
2637 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2639 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2641 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2643 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2644 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2645 bp->state = BNX2X_STATE_DISABLED;
2647 bnx2x_e1h_disable(bp);
2649 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2650 bp->state = BNX2X_STATE_OPEN;
2652 bnx2x_e1h_enable(bp);
2654 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2656 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2658 bnx2x_update_min_max(bp);
2659 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2662 /* Report results to MCP */
2664 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2666 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2669 /* must be called under the spq lock */
2670 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2672 struct eth_spe *next_spe = bp->spq_prod_bd;
2674 if (bp->spq_prod_bd == bp->spq_last_bd) {
2675 bp->spq_prod_bd = bp->spq;
2676 bp->spq_prod_idx = 0;
2677 DP(NETIF_MSG_TIMER, "end of spq\n");
2685 /* must be called under the spq lock */
2686 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2688 int func = BP_FUNC(bp);
2690 /* Make sure that BD data is updated before writing the producer */
2693 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2698 /* the slow path queue is odd since completions arrive on the fastpath ring */
2699 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2700 u32 data_hi, u32 data_lo, int common)
2702 struct eth_spe *spe;
2704 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2705 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2706 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2707 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2708 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2710 #ifdef BNX2X_STOP_ON_ERROR
2711 if (unlikely(bp->panic))
2715 spin_lock_bh(&bp->spq_lock);
2717 if (!bp->spq_left) {
2718 BNX2X_ERR("BUG! SPQ ring full!\n");
2719 spin_unlock_bh(&bp->spq_lock);
2724 spe = bnx2x_sp_get_next(bp);
2726 /* CID needs port number to be encoded int it */
2727 spe->hdr.conn_and_cmd_data =
2728 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2730 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2733 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2735 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2736 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2740 bnx2x_sp_prod_update(bp);
2741 spin_unlock_bh(&bp->spq_lock);
2745 /* acquire split MCP access lock register */
2746 static int bnx2x_acquire_alr(struct bnx2x *bp)
2753 for (j = 0; j < i*10; j++) {
2755 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2756 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2757 if (val & (1L << 31))
2762 if (!(val & (1L << 31))) {
2763 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2770 /* release split MCP access lock register */
2771 static void bnx2x_release_alr(struct bnx2x *bp)
2775 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2778 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2780 struct host_def_status_block *def_sb = bp->def_status_blk;
2783 barrier(); /* status block is written to by the chip */
2784 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2785 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2788 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2789 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2792 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2793 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2796 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2797 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2800 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2801 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2808 * slow path service functions
2811 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2813 int port = BP_PORT(bp);
2814 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2815 COMMAND_REG_ATTN_BITS_SET);
2816 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2817 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2818 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2819 NIG_REG_MASK_INTERRUPT_PORT0;
2823 if (bp->attn_state & asserted)
2824 BNX2X_ERR("IGU ERROR\n");
2826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2827 aeu_mask = REG_RD(bp, aeu_addr);
2829 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2830 aeu_mask, asserted);
2831 aeu_mask &= ~(asserted & 0xff);
2832 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2834 REG_WR(bp, aeu_addr, aeu_mask);
2835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2837 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2838 bp->attn_state |= asserted;
2839 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2841 if (asserted & ATTN_HARD_WIRED_MASK) {
2842 if (asserted & ATTN_NIG_FOR_FUNC) {
2844 bnx2x_acquire_phy_lock(bp);
2846 /* save nig interrupt mask */
2847 nig_mask = REG_RD(bp, nig_int_mask_addr);
2848 REG_WR(bp, nig_int_mask_addr, 0);
2850 bnx2x_link_attn(bp);
2852 /* handle unicore attn? */
2854 if (asserted & ATTN_SW_TIMER_4_FUNC)
2855 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2857 if (asserted & GPIO_2_FUNC)
2858 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2860 if (asserted & GPIO_3_FUNC)
2861 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2863 if (asserted & GPIO_4_FUNC)
2864 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2867 if (asserted & ATTN_GENERAL_ATTN_1) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2871 if (asserted & ATTN_GENERAL_ATTN_2) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2875 if (asserted & ATTN_GENERAL_ATTN_3) {
2876 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2877 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2880 if (asserted & ATTN_GENERAL_ATTN_4) {
2881 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2882 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2884 if (asserted & ATTN_GENERAL_ATTN_5) {
2885 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2886 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2888 if (asserted & ATTN_GENERAL_ATTN_6) {
2889 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2890 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2894 } /* if hardwired */
2896 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2898 REG_WR(bp, hc_addr, asserted);
2900 /* now set back the mask */
2901 if (asserted & ATTN_NIG_FOR_FUNC) {
2902 REG_WR(bp, nig_int_mask_addr, nig_mask);
2903 bnx2x_release_phy_lock(bp);
2907 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2909 int port = BP_PORT(bp);
2911 /* mark the failure */
2912 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2913 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2914 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2915 bp->link_params.ext_phy_config);
2917 /* log the failure */
2918 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2919 " the driver to shutdown the card to prevent permanent"
2920 " damage. Please contact Dell Support for assistance\n",
2924 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2926 int port = BP_PORT(bp);
2928 u32 val, swap_val, swap_override;
2930 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2931 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2933 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2935 val = REG_RD(bp, reg_offset);
2936 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2937 REG_WR(bp, reg_offset, val);
2939 BNX2X_ERR("SPIO5 hw attention\n");
2941 /* Fan failure attention */
2942 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2943 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2944 /* Low power mode is controlled by GPIO 2 */
2945 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2946 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2947 /* The PHY reset is controlled by GPIO 1 */
2948 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2949 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2952 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2953 /* The PHY reset is controlled by GPIO 1 */
2954 /* fake the port number to cancel the swap done in
2956 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2957 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2958 port = (swap_val && swap_override) ^ 1;
2959 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2960 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2966 bnx2x_fan_failure(bp);
2969 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2970 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2971 bnx2x_acquire_phy_lock(bp);
2972 bnx2x_handle_module_detect_int(&bp->link_params);
2973 bnx2x_release_phy_lock(bp);
2976 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2978 val = REG_RD(bp, reg_offset);
2979 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2980 REG_WR(bp, reg_offset, val);
2982 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2983 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2988 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2992 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2994 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2995 BNX2X_ERR("DB hw attention 0x%x\n", val);
2996 /* DORQ discard attention */
2998 BNX2X_ERR("FATAL error from DORQ\n");
3001 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3003 int port = BP_PORT(bp);
3006 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3007 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3009 val = REG_RD(bp, reg_offset);
3010 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3011 REG_WR(bp, reg_offset, val);
3013 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3014 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3019 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3023 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3025 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3026 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3027 /* CFC error attention */
3029 BNX2X_ERR("FATAL error from CFC\n");
3032 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3034 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3035 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3036 /* RQ_USDMDP_FIFO_OVERFLOW */
3038 BNX2X_ERR("FATAL error from PXP\n");
3041 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3043 int port = BP_PORT(bp);
3046 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3047 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3049 val = REG_RD(bp, reg_offset);
3050 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3051 REG_WR(bp, reg_offset, val);
3053 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3054 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3059 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3063 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3065 if (attn & BNX2X_PMF_LINK_ASSERT) {
3066 int func = BP_FUNC(bp);
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3069 bp->mf_config = SHMEM_RD(bp,
3070 mf_cfg.func_mf_config[func].config);
3071 val = SHMEM_RD(bp, func_mb[func].drv_status);
3072 if (val & DRV_STATUS_DCC_EVENT_MASK)
3074 (val & DRV_STATUS_DCC_EVENT_MASK));
3075 bnx2x__link_status_update(bp);
3076 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3077 bnx2x_pmf_update(bp);
3079 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3081 BNX2X_ERR("MC assert!\n");
3082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3083 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3088 } else if (attn & BNX2X_MCP_ASSERT) {
3090 BNX2X_ERR("MCP assert!\n");
3091 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3095 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3098 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3099 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3100 if (attn & BNX2X_GRC_TIMEOUT) {
3101 val = CHIP_IS_E1H(bp) ?
3102 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3103 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3105 if (attn & BNX2X_GRC_RSV) {
3106 val = CHIP_IS_E1H(bp) ?
3107 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3108 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3110 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3114 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3116 struct attn_route attn;
3117 struct attn_route group_mask;
3118 int port = BP_PORT(bp);
3124 /* need to take HW lock because MCP or other port might also
3125 try to handle this event */
3126 bnx2x_acquire_alr(bp);
3128 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3129 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3130 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3131 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3132 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3133 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3135 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3136 if (deasserted & (1 << index)) {
3137 group_mask = bp->attn_group[index];
3139 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3140 index, group_mask.sig[0], group_mask.sig[1],
3141 group_mask.sig[2], group_mask.sig[3]);
3143 bnx2x_attn_int_deasserted3(bp,
3144 attn.sig[3] & group_mask.sig[3]);
3145 bnx2x_attn_int_deasserted1(bp,
3146 attn.sig[1] & group_mask.sig[1]);
3147 bnx2x_attn_int_deasserted2(bp,
3148 attn.sig[2] & group_mask.sig[2]);
3149 bnx2x_attn_int_deasserted0(bp,
3150 attn.sig[0] & group_mask.sig[0]);
3152 if ((attn.sig[0] & group_mask.sig[0] &
3153 HW_PRTY_ASSERT_SET_0) ||
3154 (attn.sig[1] & group_mask.sig[1] &
3155 HW_PRTY_ASSERT_SET_1) ||
3156 (attn.sig[2] & group_mask.sig[2] &
3157 HW_PRTY_ASSERT_SET_2))
3158 BNX2X_ERR("FATAL HW block parity attention\n");
3162 bnx2x_release_alr(bp);
3164 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3167 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3169 REG_WR(bp, reg_addr, val);
3171 if (~bp->attn_state & deasserted)
3172 BNX2X_ERR("IGU ERROR\n");
3174 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3175 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3177 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3178 aeu_mask = REG_RD(bp, reg_addr);
3180 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3181 aeu_mask, deasserted);
3182 aeu_mask |= (deasserted & 0xff);
3183 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3185 REG_WR(bp, reg_addr, aeu_mask);
3186 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3188 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3189 bp->attn_state &= ~deasserted;
3190 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3193 static void bnx2x_attn_int(struct bnx2x *bp)
3195 /* read local copy of bits */
3196 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3198 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3200 u32 attn_state = bp->attn_state;
3202 /* look for changed bits */
3203 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3204 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3207 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3208 attn_bits, attn_ack, asserted, deasserted);
3210 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3211 BNX2X_ERR("BAD attention state\n");
3213 /* handle bits that were raised */
3215 bnx2x_attn_int_asserted(bp, asserted);
3218 bnx2x_attn_int_deasserted(bp, deasserted);
3221 static void bnx2x_sp_task(struct work_struct *work)
3223 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3227 /* Return here if interrupt is disabled */
3228 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3229 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3233 status = bnx2x_update_dsb_idx(bp);
3234 /* if (status == 0) */
3235 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3237 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3243 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3245 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3247 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3249 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3251 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3256 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3258 struct net_device *dev = dev_instance;
3259 struct bnx2x *bp = netdev_priv(dev);
3261 /* Return here if interrupt is disabled */
3262 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3263 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3267 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3269 #ifdef BNX2X_STOP_ON_ERROR
3270 if (unlikely(bp->panic))
3276 struct cnic_ops *c_ops;
3279 c_ops = rcu_dereference(bp->cnic_ops);
3281 c_ops->cnic_handler(bp->cnic_data, NULL);
3285 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3290 /* end of slow path */
3294 /****************************************************************************
3296 ****************************************************************************/
3298 /* sum[hi:lo] += add[hi:lo] */
3299 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3302 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3305 /* difference = minuend - subtrahend */
3306 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3308 if (m_lo < s_lo) { \
3310 d_hi = m_hi - s_hi; \
3312 /* we can 'loan' 1 */ \
3314 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3316 /* m_hi <= s_hi */ \
3321 /* m_lo >= s_lo */ \
3322 if (m_hi < s_hi) { \
3326 /* m_hi >= s_hi */ \
3327 d_hi = m_hi - s_hi; \
3328 d_lo = m_lo - s_lo; \
3333 #define UPDATE_STAT64(s, t) \
3335 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3336 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3337 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3338 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3339 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3340 pstats->mac_stx[1].t##_lo, diff.lo); \
3343 #define UPDATE_STAT64_NIG(s, t) \
3345 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3346 diff.lo, new->s##_lo, old->s##_lo); \
3347 ADD_64(estats->t##_hi, diff.hi, \
3348 estats->t##_lo, diff.lo); \
3351 /* sum[hi:lo] += add */
3352 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3355 s_hi += (s_lo < a) ? 1 : 0; \
3358 #define UPDATE_EXTEND_STAT(s) \
3360 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3361 pstats->mac_stx[1].s##_lo, \
3365 #define UPDATE_EXTEND_TSTAT(s, t) \
3367 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3368 old_tclient->s = tclient->s; \
3369 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3372 #define UPDATE_EXTEND_USTAT(s, t) \
3374 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3375 old_uclient->s = uclient->s; \
3376 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3379 #define UPDATE_EXTEND_XSTAT(s, t) \
3381 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3382 old_xclient->s = xclient->s; \
3383 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3386 /* minuend -= subtrahend */
3387 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3389 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3392 /* minuend[hi:lo] -= subtrahend */
3393 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3395 SUB_64(m_hi, 0, m_lo, s); \
3398 #define SUB_EXTEND_USTAT(s, t) \
3400 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3401 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3405 * General service functions
3408 static inline long bnx2x_hilo(u32 *hiref)
3410 u32 lo = *(hiref + 1);
3411 #if (BITS_PER_LONG == 64)
3414 return HILO_U64(hi, lo);
3421 * Init service functions
3424 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3426 if (!bp->stats_pending) {
3427 struct eth_query_ramrod_data ramrod_data = {0};
3430 ramrod_data.drv_counter = bp->stats_counter++;
3431 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3432 for_each_queue(bp, i)
3433 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3435 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3436 ((u32 *)&ramrod_data)[1],
3437 ((u32 *)&ramrod_data)[0], 0);
3439 /* stats ramrod has it's own slot on the spq */
3441 bp->stats_pending = 1;
3446 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3448 struct dmae_command *dmae = &bp->stats_dmae;
3449 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3451 *stats_comp = DMAE_COMP_VAL;
3452 if (CHIP_REV_IS_SLOW(bp))
3456 if (bp->executer_idx) {
3457 int loader_idx = PMF_DMAE_C(bp);
3459 memset(dmae, 0, sizeof(struct dmae_command));
3461 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3462 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3463 DMAE_CMD_DST_RESET |
3465 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3467 DMAE_CMD_ENDIANITY_DW_SWAP |
3469 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3471 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3472 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3473 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3474 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3475 sizeof(struct dmae_command) *
3476 (loader_idx + 1)) >> 2;
3477 dmae->dst_addr_hi = 0;
3478 dmae->len = sizeof(struct dmae_command) >> 2;
3481 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3482 dmae->comp_addr_hi = 0;
3486 bnx2x_post_dmae(bp, dmae, loader_idx);
3488 } else if (bp->func_stx) {
3490 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3494 static int bnx2x_stats_comp(struct bnx2x *bp)
3496 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3500 while (*stats_comp != DMAE_COMP_VAL) {
3502 BNX2X_ERR("timeout waiting for stats finished\n");
3512 * Statistics service functions
3515 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3517 struct dmae_command *dmae;
3519 int loader_idx = PMF_DMAE_C(bp);
3520 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3523 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3524 BNX2X_ERR("BUG!\n");
3528 bp->executer_idx = 0;
3530 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3532 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3534 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3536 DMAE_CMD_ENDIANITY_DW_SWAP |
3538 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3539 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3541 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3542 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3543 dmae->src_addr_lo = bp->port.port_stx >> 2;
3544 dmae->src_addr_hi = 0;
3545 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3546 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3547 dmae->len = DMAE_LEN32_RD_MAX;
3548 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3549 dmae->comp_addr_hi = 0;
3552 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3553 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3554 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3555 dmae->src_addr_hi = 0;
3556 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3557 DMAE_LEN32_RD_MAX * 4);
3558 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3559 DMAE_LEN32_RD_MAX * 4);
3560 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3561 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3562 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3563 dmae->comp_val = DMAE_COMP_VAL;
3566 bnx2x_hw_stats_post(bp);
3567 bnx2x_stats_comp(bp);
3570 static void bnx2x_port_stats_init(struct bnx2x *bp)
3572 struct dmae_command *dmae;
3573 int port = BP_PORT(bp);
3574 int vn = BP_E1HVN(bp);
3576 int loader_idx = PMF_DMAE_C(bp);
3578 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3581 if (!bp->link_vars.link_up || !bp->port.pmf) {
3582 BNX2X_ERR("BUG!\n");
3586 bp->executer_idx = 0;
3589 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3590 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3591 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3593 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3595 DMAE_CMD_ENDIANITY_DW_SWAP |
3597 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3598 (vn << DMAE_CMD_E1HVN_SHIFT));
3600 if (bp->port.port_stx) {
3602 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3603 dmae->opcode = opcode;
3604 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3605 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3606 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3607 dmae->dst_addr_hi = 0;
3608 dmae->len = sizeof(struct host_port_stats) >> 2;
3609 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3610 dmae->comp_addr_hi = 0;
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
3618 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3619 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3620 dmae->dst_addr_lo = bp->func_stx >> 2;
3621 dmae->dst_addr_hi = 0;
3622 dmae->len = sizeof(struct host_func_stats) >> 2;
3623 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3624 dmae->comp_addr_hi = 0;
3629 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3630 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3631 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3633 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3635 DMAE_CMD_ENDIANITY_DW_SWAP |
3637 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3638 (vn << DMAE_CMD_E1HVN_SHIFT));
3640 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3642 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3643 NIG_REG_INGRESS_BMAC0_MEM);
3645 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3646 BIGMAC_REGISTER_TX_STAT_GTBYT */
3647 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3648 dmae->opcode = opcode;
3649 dmae->src_addr_lo = (mac_addr +
3650 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3651 dmae->src_addr_hi = 0;
3652 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3653 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3654 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3655 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3660 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3661 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3662 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3663 dmae->opcode = opcode;
3664 dmae->src_addr_lo = (mac_addr +
3665 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3666 dmae->src_addr_hi = 0;
3667 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3668 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3669 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3670 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3671 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3672 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3673 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674 dmae->comp_addr_hi = 0;
3677 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3679 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3681 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3682 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3683 dmae->opcode = opcode;
3684 dmae->src_addr_lo = (mac_addr +
3685 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3686 dmae->src_addr_hi = 0;
3687 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3688 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3689 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3690 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3691 dmae->comp_addr_hi = 0;
3694 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3695 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3696 dmae->opcode = opcode;
3697 dmae->src_addr_lo = (mac_addr +
3698 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3699 dmae->src_addr_hi = 0;
3700 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3701 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3703 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3705 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3706 dmae->comp_addr_hi = 0;
3709 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3710 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3711 dmae->opcode = opcode;
3712 dmae->src_addr_lo = (mac_addr +
3713 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3714 dmae->src_addr_hi = 0;
3715 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3716 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3717 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3718 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3719 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3720 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3721 dmae->comp_addr_hi = 0;
3726 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3727 dmae->opcode = opcode;
3728 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3729 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3730 dmae->src_addr_hi = 0;
3731 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3732 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3733 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3734 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3735 dmae->comp_addr_hi = 0;
3738 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3739 dmae->opcode = opcode;
3740 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3741 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3742 dmae->src_addr_hi = 0;
3743 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3744 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3745 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3746 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3747 dmae->len = (2*sizeof(u32)) >> 2;
3748 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3749 dmae->comp_addr_hi = 0;
3752 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3753 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3754 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3755 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3757 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3759 DMAE_CMD_ENDIANITY_DW_SWAP |
3761 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3762 (vn << DMAE_CMD_E1HVN_SHIFT));
3763 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3764 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3765 dmae->src_addr_hi = 0;
3766 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3767 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3768 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3769 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3770 dmae->len = (2*sizeof(u32)) >> 2;
3771 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3772 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3773 dmae->comp_val = DMAE_COMP_VAL;
3778 static void bnx2x_func_stats_init(struct bnx2x *bp)
3780 struct dmae_command *dmae = &bp->stats_dmae;
3781 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3784 if (!bp->func_stx) {
3785 BNX2X_ERR("BUG!\n");
3789 bp->executer_idx = 0;
3790 memset(dmae, 0, sizeof(struct dmae_command));
3792 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3793 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3794 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3796 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3798 DMAE_CMD_ENDIANITY_DW_SWAP |
3800 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3801 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3802 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3803 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3804 dmae->dst_addr_lo = bp->func_stx >> 2;
3805 dmae->dst_addr_hi = 0;
3806 dmae->len = sizeof(struct host_func_stats) >> 2;
3807 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3808 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3809 dmae->comp_val = DMAE_COMP_VAL;
3814 static void bnx2x_stats_start(struct bnx2x *bp)
3817 bnx2x_port_stats_init(bp);
3819 else if (bp->func_stx)
3820 bnx2x_func_stats_init(bp);
3822 bnx2x_hw_stats_post(bp);
3823 bnx2x_storm_stats_post(bp);
3826 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3828 bnx2x_stats_comp(bp);
3829 bnx2x_stats_pmf_update(bp);
3830 bnx2x_stats_start(bp);
3833 static void bnx2x_stats_restart(struct bnx2x *bp)
3835 bnx2x_stats_comp(bp);
3836 bnx2x_stats_start(bp);
3839 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3841 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3842 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3843 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3849 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3850 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3851 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3852 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3853 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3854 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3855 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3856 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3857 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3858 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3859 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3860 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3861 UPDATE_STAT64(tx_stat_gt127,
3862 tx_stat_etherstatspkts65octetsto127octets);
3863 UPDATE_STAT64(tx_stat_gt255,
3864 tx_stat_etherstatspkts128octetsto255octets);
3865 UPDATE_STAT64(tx_stat_gt511,
3866 tx_stat_etherstatspkts256octetsto511octets);
3867 UPDATE_STAT64(tx_stat_gt1023,
3868 tx_stat_etherstatspkts512octetsto1023octets);
3869 UPDATE_STAT64(tx_stat_gt1518,
3870 tx_stat_etherstatspkts1024octetsto1522octets);
3871 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3872 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3873 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3874 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3875 UPDATE_STAT64(tx_stat_gterr,
3876 tx_stat_dot3statsinternalmactransmiterrors);
3877 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3879 estats->pause_frames_received_hi =
3880 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3881 estats->pause_frames_received_lo =
3882 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3884 estats->pause_frames_sent_hi =
3885 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3886 estats->pause_frames_sent_lo =
3887 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3890 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3892 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3893 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3894 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3896 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3897 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3898 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3899 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3900 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3901 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3902 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3903 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3904 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3905 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3906 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3907 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3908 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3909 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3910 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3911 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3912 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3913 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3914 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3915 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3916 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3917 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3918 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3920 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3921 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3922 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3923 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3924 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3925 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3926 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3928 estats->pause_frames_received_hi =
3929 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3930 estats->pause_frames_received_lo =
3931 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3932 ADD_64(estats->pause_frames_received_hi,
3933 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3934 estats->pause_frames_received_lo,
3935 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3937 estats->pause_frames_sent_hi =
3938 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3939 estats->pause_frames_sent_lo =
3940 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3941 ADD_64(estats->pause_frames_sent_hi,
3942 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3943 estats->pause_frames_sent_lo,
3944 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3947 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3949 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3950 struct nig_stats *old = &(bp->port.old_nig_stats);
3951 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3952 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3959 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3960 bnx2x_bmac_stats_update(bp);
3962 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3963 bnx2x_emac_stats_update(bp);
3965 else { /* unreached */
3966 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3970 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3971 new->brb_discard - old->brb_discard);
3972 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3973 new->brb_truncate - old->brb_truncate);
3975 UPDATE_STAT64_NIG(egress_mac_pkt0,
3976 etherstatspkts1024octetsto1522octets);
3977 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3979 memcpy(old, new, sizeof(struct nig_stats));
3981 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3982 sizeof(struct mac_stx));
3983 estats->brb_drop_hi = pstats->brb_drop_hi;
3984 estats->brb_drop_lo = pstats->brb_drop_lo;
3986 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3988 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3989 if (nig_timer_max != estats->nig_timer_max) {
3990 estats->nig_timer_max = nig_timer_max;
3991 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3997 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3999 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4000 struct tstorm_per_port_stats *tport =
4001 &stats->tstorm_common.port_statistics;
4002 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4003 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4006 memcpy(&(fstats->total_bytes_received_hi),
4007 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4008 sizeof(struct host_func_stats) - 2*sizeof(u32));
4009 estats->error_bytes_received_hi = 0;
4010 estats->error_bytes_received_lo = 0;
4011 estats->etherstatsoverrsizepkts_hi = 0;
4012 estats->etherstatsoverrsizepkts_lo = 0;
4013 estats->no_buff_discard_hi = 0;
4014 estats->no_buff_discard_lo = 0;
4016 for_each_rx_queue(bp, i) {
4017 struct bnx2x_fastpath *fp = &bp->fp[i];
4018 int cl_id = fp->cl_id;
4019 struct tstorm_per_client_stats *tclient =
4020 &stats->tstorm_common.client_statistics[cl_id];
4021 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4022 struct ustorm_per_client_stats *uclient =
4023 &stats->ustorm_common.client_statistics[cl_id];
4024 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4025 struct xstorm_per_client_stats *xclient =
4026 &stats->xstorm_common.client_statistics[cl_id];
4027 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4028 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4031 /* are storm stats valid? */
4032 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4033 bp->stats_counter) {
4034 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4035 " xstorm counter (%d) != stats_counter (%d)\n",
4036 i, xclient->stats_counter, bp->stats_counter);
4039 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4040 bp->stats_counter) {
4041 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4042 " tstorm counter (%d) != stats_counter (%d)\n",
4043 i, tclient->stats_counter, bp->stats_counter);
4046 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4047 bp->stats_counter) {
4048 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4049 " ustorm counter (%d) != stats_counter (%d)\n",
4050 i, uclient->stats_counter, bp->stats_counter);
4054 qstats->total_bytes_received_hi =
4055 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4056 qstats->total_bytes_received_lo =
4057 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4059 ADD_64(qstats->total_bytes_received_hi,
4060 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4061 qstats->total_bytes_received_lo,
4062 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4064 ADD_64(qstats->total_bytes_received_hi,
4065 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4066 qstats->total_bytes_received_lo,
4067 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4069 qstats->valid_bytes_received_hi =
4070 qstats->total_bytes_received_hi;
4071 qstats->valid_bytes_received_lo =
4072 qstats->total_bytes_received_lo;
4074 qstats->error_bytes_received_hi =
4075 le32_to_cpu(tclient->rcv_error_bytes.hi);
4076 qstats->error_bytes_received_lo =
4077 le32_to_cpu(tclient->rcv_error_bytes.lo);
4079 ADD_64(qstats->total_bytes_received_hi,
4080 qstats->error_bytes_received_hi,
4081 qstats->total_bytes_received_lo,
4082 qstats->error_bytes_received_lo);
4084 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4085 total_unicast_packets_received);
4086 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4087 total_multicast_packets_received);
4088 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4089 total_broadcast_packets_received);
4090 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4091 etherstatsoverrsizepkts);
4092 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4094 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4095 total_unicast_packets_received);
4096 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4097 total_multicast_packets_received);
4098 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4099 total_broadcast_packets_received);
4100 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4101 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4102 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4104 qstats->total_bytes_transmitted_hi =
4105 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4106 qstats->total_bytes_transmitted_lo =
4107 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4109 ADD_64(qstats->total_bytes_transmitted_hi,
4110 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4111 qstats->total_bytes_transmitted_lo,
4112 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4114 ADD_64(qstats->total_bytes_transmitted_hi,
4115 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4116 qstats->total_bytes_transmitted_lo,
4117 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4119 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4120 total_unicast_packets_transmitted);
4121 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4122 total_multicast_packets_transmitted);
4123 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4124 total_broadcast_packets_transmitted);
4126 old_tclient->checksum_discard = tclient->checksum_discard;
4127 old_tclient->ttl0_discard = tclient->ttl0_discard;
4129 ADD_64(fstats->total_bytes_received_hi,
4130 qstats->total_bytes_received_hi,
4131 fstats->total_bytes_received_lo,
4132 qstats->total_bytes_received_lo);
4133 ADD_64(fstats->total_bytes_transmitted_hi,
4134 qstats->total_bytes_transmitted_hi,
4135 fstats->total_bytes_transmitted_lo,
4136 qstats->total_bytes_transmitted_lo);
4137 ADD_64(fstats->total_unicast_packets_received_hi,
4138 qstats->total_unicast_packets_received_hi,
4139 fstats->total_unicast_packets_received_lo,
4140 qstats->total_unicast_packets_received_lo);
4141 ADD_64(fstats->total_multicast_packets_received_hi,
4142 qstats->total_multicast_packets_received_hi,
4143 fstats->total_multicast_packets_received_lo,
4144 qstats->total_multicast_packets_received_lo);
4145 ADD_64(fstats->total_broadcast_packets_received_hi,
4146 qstats->total_broadcast_packets_received_hi,
4147 fstats->total_broadcast_packets_received_lo,
4148 qstats->total_broadcast_packets_received_lo);
4149 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4150 qstats->total_unicast_packets_transmitted_hi,
4151 fstats->total_unicast_packets_transmitted_lo,
4152 qstats->total_unicast_packets_transmitted_lo);
4153 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4154 qstats->total_multicast_packets_transmitted_hi,
4155 fstats->total_multicast_packets_transmitted_lo,
4156 qstats->total_multicast_packets_transmitted_lo);
4157 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4158 qstats->total_broadcast_packets_transmitted_hi,
4159 fstats->total_broadcast_packets_transmitted_lo,
4160 qstats->total_broadcast_packets_transmitted_lo);
4161 ADD_64(fstats->valid_bytes_received_hi,
4162 qstats->valid_bytes_received_hi,
4163 fstats->valid_bytes_received_lo,
4164 qstats->valid_bytes_received_lo);
4166 ADD_64(estats->error_bytes_received_hi,
4167 qstats->error_bytes_received_hi,
4168 estats->error_bytes_received_lo,
4169 qstats->error_bytes_received_lo);
4170 ADD_64(estats->etherstatsoverrsizepkts_hi,
4171 qstats->etherstatsoverrsizepkts_hi,
4172 estats->etherstatsoverrsizepkts_lo,
4173 qstats->etherstatsoverrsizepkts_lo);
4174 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4175 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4178 ADD_64(fstats->total_bytes_received_hi,
4179 estats->rx_stat_ifhcinbadoctets_hi,
4180 fstats->total_bytes_received_lo,
4181 estats->rx_stat_ifhcinbadoctets_lo);
4183 memcpy(estats, &(fstats->total_bytes_received_hi),
4184 sizeof(struct host_func_stats) - 2*sizeof(u32));
4186 ADD_64(estats->etherstatsoverrsizepkts_hi,
4187 estats->rx_stat_dot3statsframestoolong_hi,
4188 estats->etherstatsoverrsizepkts_lo,
4189 estats->rx_stat_dot3statsframestoolong_lo);
4190 ADD_64(estats->error_bytes_received_hi,
4191 estats->rx_stat_ifhcinbadoctets_hi,
4192 estats->error_bytes_received_lo,
4193 estats->rx_stat_ifhcinbadoctets_lo);
4196 estats->mac_filter_discard =
4197 le32_to_cpu(tport->mac_filter_discard);
4198 estats->xxoverflow_discard =
4199 le32_to_cpu(tport->xxoverflow_discard);
4200 estats->brb_truncate_discard =
4201 le32_to_cpu(tport->brb_truncate_discard);
4202 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4205 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4207 bp->stats_pending = 0;
4212 static void bnx2x_net_stats_update(struct bnx2x *bp)
4214 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4215 struct net_device_stats *nstats = &bp->dev->stats;
4218 nstats->rx_packets =
4219 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4220 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4221 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4223 nstats->tx_packets =
4224 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4225 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4226 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4228 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4230 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4232 nstats->rx_dropped = estats->mac_discard;
4233 for_each_rx_queue(bp, i)
4234 nstats->rx_dropped +=
4235 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4237 nstats->tx_dropped = 0;
4240 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4242 nstats->collisions =
4243 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4245 nstats->rx_length_errors =
4246 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4247 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4248 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4249 bnx2x_hilo(&estats->brb_truncate_hi);
4250 nstats->rx_crc_errors =
4251 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4252 nstats->rx_frame_errors =
4253 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4254 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4255 nstats->rx_missed_errors = estats->xxoverflow_discard;
4257 nstats->rx_errors = nstats->rx_length_errors +
4258 nstats->rx_over_errors +
4259 nstats->rx_crc_errors +
4260 nstats->rx_frame_errors +
4261 nstats->rx_fifo_errors +
4262 nstats->rx_missed_errors;
4264 nstats->tx_aborted_errors =
4265 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4266 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4267 nstats->tx_carrier_errors =
4268 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4269 nstats->tx_fifo_errors = 0;
4270 nstats->tx_heartbeat_errors = 0;
4271 nstats->tx_window_errors = 0;
4273 nstats->tx_errors = nstats->tx_aborted_errors +
4274 nstats->tx_carrier_errors +
4275 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4278 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4280 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4283 estats->driver_xoff = 0;
4284 estats->rx_err_discard_pkt = 0;
4285 estats->rx_skb_alloc_failed = 0;
4286 estats->hw_csum_err = 0;
4287 for_each_rx_queue(bp, i) {
4288 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4290 estats->driver_xoff += qstats->driver_xoff;
4291 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4292 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4293 estats->hw_csum_err += qstats->hw_csum_err;
4297 static void bnx2x_stats_update(struct bnx2x *bp)
4299 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4301 if (*stats_comp != DMAE_COMP_VAL)
4305 bnx2x_hw_stats_update(bp);
4307 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4308 BNX2X_ERR("storm stats were not updated for 3 times\n");
4313 bnx2x_net_stats_update(bp);
4314 bnx2x_drv_stats_update(bp);
4316 if (bp->msglevel & NETIF_MSG_TIMER) {
4317 struct bnx2x_fastpath *fp0_rx = bp->fp;
4318 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4319 struct tstorm_per_client_stats *old_tclient =
4320 &bp->fp->old_tclient;
4321 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4322 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4323 struct net_device_stats *nstats = &bp->dev->stats;
4326 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4327 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4329 bnx2x_tx_avail(fp0_tx),
4330 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4331 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4333 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4334 fp0_rx->rx_comp_cons),
4335 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4336 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4337 "brb truncate %u\n",
4338 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4339 qstats->driver_xoff,
4340 estats->brb_drop_lo, estats->brb_truncate_lo);
4341 printk(KERN_DEBUG "tstats: checksum_discard %u "
4342 "packets_too_big_discard %lu no_buff_discard %lu "
4343 "mac_discard %u mac_filter_discard %u "
4344 "xxovrflow_discard %u brb_truncate_discard %u "
4345 "ttl0_discard %u\n",
4346 le32_to_cpu(old_tclient->checksum_discard),
4347 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4348 bnx2x_hilo(&qstats->no_buff_discard_hi),
4349 estats->mac_discard, estats->mac_filter_discard,
4350 estats->xxoverflow_discard, estats->brb_truncate_discard,
4351 le32_to_cpu(old_tclient->ttl0_discard));
4353 for_each_queue(bp, i) {
4354 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4355 bnx2x_fp(bp, i, tx_pkt),
4356 bnx2x_fp(bp, i, rx_pkt),
4357 bnx2x_fp(bp, i, rx_calls));
4361 bnx2x_hw_stats_post(bp);
4362 bnx2x_storm_stats_post(bp);
4365 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4367 struct dmae_command *dmae;
4369 int loader_idx = PMF_DMAE_C(bp);
4370 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4372 bp->executer_idx = 0;
4374 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4376 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4378 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4380 DMAE_CMD_ENDIANITY_DW_SWAP |
4382 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4383 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4385 if (bp->port.port_stx) {
4387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4389 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4391 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4392 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4393 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4394 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4395 dmae->dst_addr_hi = 0;
4396 dmae->len = sizeof(struct host_port_stats) >> 2;
4398 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4399 dmae->comp_addr_hi = 0;
4402 dmae->comp_addr_lo =
4403 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4404 dmae->comp_addr_hi =
4405 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_val = DMAE_COMP_VAL;
4414 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4415 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4416 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4417 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4418 dmae->dst_addr_lo = bp->func_stx >> 2;
4419 dmae->dst_addr_hi = 0;
4420 dmae->len = sizeof(struct host_func_stats) >> 2;
4421 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4422 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4423 dmae->comp_val = DMAE_COMP_VAL;
4429 static void bnx2x_stats_stop(struct bnx2x *bp)
4433 bnx2x_stats_comp(bp);
4436 update = (bnx2x_hw_stats_update(bp) == 0);
4438 update |= (bnx2x_storm_stats_update(bp) == 0);
4441 bnx2x_net_stats_update(bp);
4444 bnx2x_port_stats_stop(bp);
4446 bnx2x_hw_stats_post(bp);
4447 bnx2x_stats_comp(bp);
4451 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4455 static const struct {
4456 void (*action)(struct bnx2x *bp);
4457 enum bnx2x_stats_state next_state;
4458 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4461 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4462 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4463 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4464 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4467 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4468 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4469 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4470 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4474 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4476 enum bnx2x_stats_state state = bp->stats_state;
4478 bnx2x_stats_stm[state][event].action(bp);
4479 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4481 /* Make sure the state has been "changed" */
4484 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4485 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4486 state, event, bp->stats_state);
4489 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4491 struct dmae_command *dmae;
4492 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4495 if (!bp->port.pmf || !bp->port.port_stx) {
4496 BNX2X_ERR("BUG!\n");
4500 bp->executer_idx = 0;
4502 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4503 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4504 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4505 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4507 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4509 DMAE_CMD_ENDIANITY_DW_SWAP |
4511 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4512 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4513 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4514 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4515 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4516 dmae->dst_addr_hi = 0;
4517 dmae->len = sizeof(struct host_port_stats) >> 2;
4518 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4519 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4520 dmae->comp_val = DMAE_COMP_VAL;
4523 bnx2x_hw_stats_post(bp);
4524 bnx2x_stats_comp(bp);
4527 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4529 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4530 int port = BP_PORT(bp);
4535 if (!bp->port.pmf || !bp->func_stx) {
4536 BNX2X_ERR("BUG!\n");
4540 /* save our func_stx */
4541 func_stx = bp->func_stx;
4543 for (vn = VN_0; vn < vn_max; vn++) {
4546 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4547 bnx2x_func_stats_init(bp);
4548 bnx2x_hw_stats_post(bp);
4549 bnx2x_stats_comp(bp);
4552 /* restore our func_stx */
4553 bp->func_stx = func_stx;
4556 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4558 struct dmae_command *dmae = &bp->stats_dmae;
4559 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4562 if (!bp->func_stx) {
4563 BNX2X_ERR("BUG!\n");
4567 bp->executer_idx = 0;
4568 memset(dmae, 0, sizeof(struct dmae_command));
4570 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4571 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4572 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4574 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4576 DMAE_CMD_ENDIANITY_DW_SWAP |
4578 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4579 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4580 dmae->src_addr_lo = bp->func_stx >> 2;
4581 dmae->src_addr_hi = 0;
4582 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4583 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4584 dmae->len = sizeof(struct host_func_stats) >> 2;
4585 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4586 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4587 dmae->comp_val = DMAE_COMP_VAL;
4590 bnx2x_hw_stats_post(bp);
4591 bnx2x_stats_comp(bp);
4594 static void bnx2x_stats_init(struct bnx2x *bp)
4596 int port = BP_PORT(bp);
4597 int func = BP_FUNC(bp);
4600 bp->stats_pending = 0;
4601 bp->executer_idx = 0;
4602 bp->stats_counter = 0;
4604 /* port and func stats for management */
4605 if (!BP_NOMCP(bp)) {
4606 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4607 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4610 bp->port.port_stx = 0;
4613 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4614 bp->port.port_stx, bp->func_stx);
4617 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4618 bp->port.old_nig_stats.brb_discard =
4619 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4620 bp->port.old_nig_stats.brb_truncate =
4621 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4622 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4623 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4624 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4625 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4627 /* function stats */
4628 for_each_queue(bp, i) {
4629 struct bnx2x_fastpath *fp = &bp->fp[i];
4631 memset(&fp->old_tclient, 0,
4632 sizeof(struct tstorm_per_client_stats));
4633 memset(&fp->old_uclient, 0,
4634 sizeof(struct ustorm_per_client_stats));
4635 memset(&fp->old_xclient, 0,
4636 sizeof(struct xstorm_per_client_stats));
4637 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4640 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4641 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4643 bp->stats_state = STATS_STATE_DISABLED;
4646 if (bp->port.port_stx)
4647 bnx2x_port_stats_base_init(bp);
4650 bnx2x_func_stats_base_init(bp);
4652 } else if (bp->func_stx)
4653 bnx2x_func_stats_base_update(bp);
4656 static void bnx2x_timer(unsigned long data)
4658 struct bnx2x *bp = (struct bnx2x *) data;
4660 if (!netif_running(bp->dev))
4663 if (atomic_read(&bp->intr_sem) != 0)
4667 struct bnx2x_fastpath *fp = &bp->fp[0];
4671 rc = bnx2x_rx_int(fp, 1000);
4674 if (!BP_NOMCP(bp)) {
4675 int func = BP_FUNC(bp);
4679 ++bp->fw_drv_pulse_wr_seq;
4680 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4681 /* TBD - add SYSTEM_TIME */
4682 drv_pulse = bp->fw_drv_pulse_wr_seq;
4683 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4685 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4686 MCP_PULSE_SEQ_MASK);
4687 /* The delta between driver pulse and mcp response
4688 * should be 1 (before mcp response) or 0 (after mcp response)
4690 if ((drv_pulse != mcp_pulse) &&
4691 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4692 /* someone lost a heartbeat... */
4693 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4694 drv_pulse, mcp_pulse);
4698 if ((bp->state == BNX2X_STATE_OPEN) ||
4699 (bp->state == BNX2X_STATE_DISABLED))
4700 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4703 mod_timer(&bp->timer, jiffies + bp->current_interval);
4706 /* end of Statistics */
4711 * nic init service functions
4714 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4716 int port = BP_PORT(bp);
4719 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4720 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4721 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4722 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4723 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4724 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4727 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4728 dma_addr_t mapping, int sb_id)
4730 int port = BP_PORT(bp);
4731 int func = BP_FUNC(bp);
4736 section = ((u64)mapping) + offsetof(struct host_status_block,
4738 sb->u_status_block.status_block_id = sb_id;
4740 REG_WR(bp, BAR_CSTRORM_INTMEM +
4741 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4742 REG_WR(bp, BAR_CSTRORM_INTMEM +
4743 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4745 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4746 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4748 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4749 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4750 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4753 section = ((u64)mapping) + offsetof(struct host_status_block,
4755 sb->c_status_block.status_block_id = sb_id;
4757 REG_WR(bp, BAR_CSTRORM_INTMEM +
4758 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4759 REG_WR(bp, BAR_CSTRORM_INTMEM +
4760 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4762 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4763 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4765 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4766 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4767 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4769 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4772 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4774 int func = BP_FUNC(bp);
4776 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4777 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4778 sizeof(struct tstorm_def_status_block)/4);
4779 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4780 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4781 sizeof(struct cstorm_def_status_block_u)/4);
4782 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4783 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4784 sizeof(struct cstorm_def_status_block_c)/4);
4785 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4786 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4787 sizeof(struct xstorm_def_status_block)/4);
4790 static void bnx2x_init_def_sb(struct bnx2x *bp,
4791 struct host_def_status_block *def_sb,
4792 dma_addr_t mapping, int sb_id)
4794 int port = BP_PORT(bp);
4795 int func = BP_FUNC(bp);
4796 int index, val, reg_offset;
4800 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4801 atten_status_block);
4802 def_sb->atten_status_block.status_block_id = sb_id;
4806 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4807 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4809 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4810 bp->attn_group[index].sig[0] = REG_RD(bp,
4811 reg_offset + 0x10*index);
4812 bp->attn_group[index].sig[1] = REG_RD(bp,
4813 reg_offset + 0x4 + 0x10*index);
4814 bp->attn_group[index].sig[2] = REG_RD(bp,
4815 reg_offset + 0x8 + 0x10*index);
4816 bp->attn_group[index].sig[3] = REG_RD(bp,
4817 reg_offset + 0xc + 0x10*index);
4820 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4821 HC_REG_ATTN_MSG0_ADDR_L);
4823 REG_WR(bp, reg_offset, U64_LO(section));
4824 REG_WR(bp, reg_offset + 4, U64_HI(section));
4826 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4828 val = REG_RD(bp, reg_offset);
4830 REG_WR(bp, reg_offset, val);
4833 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4834 u_def_status_block);
4835 def_sb->u_def_status_block.status_block_id = sb_id;
4837 REG_WR(bp, BAR_CSTRORM_INTMEM +
4838 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4839 REG_WR(bp, BAR_CSTRORM_INTMEM +
4840 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4842 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4843 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4845 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4846 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4847 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4850 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4851 c_def_status_block);
4852 def_sb->c_def_status_block.status_block_id = sb_id;
4854 REG_WR(bp, BAR_CSTRORM_INTMEM +
4855 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4856 REG_WR(bp, BAR_CSTRORM_INTMEM +
4857 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4859 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4860 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4862 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4863 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4864 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4867 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4868 t_def_status_block);
4869 def_sb->t_def_status_block.status_block_id = sb_id;
4871 REG_WR(bp, BAR_TSTRORM_INTMEM +
4872 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4873 REG_WR(bp, BAR_TSTRORM_INTMEM +
4874 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4876 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4877 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4879 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4880 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4881 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4884 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4885 x_def_status_block);
4886 def_sb->x_def_status_block.status_block_id = sb_id;
4888 REG_WR(bp, BAR_XSTRORM_INTMEM +
4889 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4890 REG_WR(bp, BAR_XSTRORM_INTMEM +
4891 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4893 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4894 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4896 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4897 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4898 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4900 bp->stats_pending = 0;
4901 bp->set_mac_pending = 0;
4903 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4906 static void bnx2x_update_coalesce(struct bnx2x *bp)
4908 int port = BP_PORT(bp);
4911 for_each_queue(bp, i) {
4912 int sb_id = bp->fp[i].sb_id;
4914 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4915 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4916 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4917 U_SB_ETH_RX_CQ_INDEX),
4919 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4920 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4921 U_SB_ETH_RX_CQ_INDEX),
4922 (bp->rx_ticks/12) ? 0 : 1);
4924 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4925 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4926 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4927 C_SB_ETH_TX_CQ_INDEX),
4929 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4930 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4931 C_SB_ETH_TX_CQ_INDEX),
4932 (bp->tx_ticks/12) ? 0 : 1);
4936 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4937 struct bnx2x_fastpath *fp, int last)
4941 for (i = 0; i < last; i++) {
4942 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4943 struct sk_buff *skb = rx_buf->skb;
4946 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4950 if (fp->tpa_state[i] == BNX2X_TPA_START)
4951 pci_unmap_single(bp->pdev,
4952 pci_unmap_addr(rx_buf, mapping),
4953 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4960 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4962 int func = BP_FUNC(bp);
4963 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4964 ETH_MAX_AGGREGATION_QUEUES_E1H;
4965 u16 ring_prod, cqe_ring_prod;
4968 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4970 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4972 if (bp->flags & TPA_ENABLE_FLAG) {
4974 for_each_rx_queue(bp, j) {
4975 struct bnx2x_fastpath *fp = &bp->fp[j];
4977 for (i = 0; i < max_agg_queues; i++) {
4978 fp->tpa_pool[i].skb =
4979 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4980 if (!fp->tpa_pool[i].skb) {
4981 BNX2X_ERR("Failed to allocate TPA "
4982 "skb pool for queue[%d] - "
4983 "disabling TPA on this "
4985 bnx2x_free_tpa_pool(bp, fp, i);
4986 fp->disable_tpa = 1;
4989 pci_unmap_addr_set((struct sw_rx_bd *)
4990 &bp->fp->tpa_pool[i],
4992 fp->tpa_state[i] = BNX2X_TPA_STOP;
4997 for_each_rx_queue(bp, j) {
4998 struct bnx2x_fastpath *fp = &bp->fp[j];
5001 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5002 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5004 /* Mark queue as Rx */
5005 fp->is_rx_queue = 1;
5007 /* "next page" elements initialization */
5009 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5010 struct eth_rx_sge *sge;
5012 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5014 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5015 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5017 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5018 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5021 bnx2x_init_sge_ring_bit_mask(fp);
5024 for (i = 1; i <= NUM_RX_RINGS; i++) {
5025 struct eth_rx_bd *rx_bd;
5027 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5029 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5030 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5032 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5033 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5037 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5038 struct eth_rx_cqe_next_page *nextpg;
5040 nextpg = (struct eth_rx_cqe_next_page *)
5041 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5043 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5044 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5046 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5047 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5050 /* Allocate SGEs and initialize the ring elements */
5051 for (i = 0, ring_prod = 0;
5052 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5054 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5055 BNX2X_ERR("was only able to allocate "
5057 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5058 /* Cleanup already allocated elements */
5059 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5060 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5061 fp->disable_tpa = 1;
5065 ring_prod = NEXT_SGE_IDX(ring_prod);
5067 fp->rx_sge_prod = ring_prod;
5069 /* Allocate BDs and initialize BD ring */
5070 fp->rx_comp_cons = 0;
5071 cqe_ring_prod = ring_prod = 0;
5072 for (i = 0; i < bp->rx_ring_size; i++) {
5073 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5074 BNX2X_ERR("was only able to allocate "
5075 "%d rx skbs on queue[%d]\n", i, j);
5076 fp->eth_q_stats.rx_skb_alloc_failed++;
5079 ring_prod = NEXT_RX_IDX(ring_prod);
5080 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5081 WARN_ON(ring_prod <= i);
5084 fp->rx_bd_prod = ring_prod;
5085 /* must not have more available CQEs than BDs */
5086 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5088 fp->rx_pkt = fp->rx_calls = 0;
5091 * this will generate an interrupt (to the TSTORM)
5092 * must only be done after chip is initialized
5094 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5099 REG_WR(bp, BAR_USTRORM_INTMEM +
5100 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5101 U64_LO(fp->rx_comp_mapping));
5102 REG_WR(bp, BAR_USTRORM_INTMEM +
5103 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5104 U64_HI(fp->rx_comp_mapping));
5108 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5112 for_each_tx_queue(bp, j) {
5113 struct bnx2x_fastpath *fp = &bp->fp[j];
5115 for (i = 1; i <= NUM_TX_RINGS; i++) {
5116 struct eth_tx_next_bd *tx_next_bd =
5117 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5119 tx_next_bd->addr_hi =
5120 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5121 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5122 tx_next_bd->addr_lo =
5123 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5124 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5127 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5128 fp->tx_db.data.zero_fill1 = 0;
5129 fp->tx_db.data.prod = 0;
5131 fp->tx_pkt_prod = 0;
5132 fp->tx_pkt_cons = 0;
5135 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5139 /* clean tx statistics */
5140 for_each_rx_queue(bp, i)
5141 bnx2x_fp(bp, i, tx_pkt) = 0;
5144 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5146 int func = BP_FUNC(bp);
5148 spin_lock_init(&bp->spq_lock);
5150 bp->spq_left = MAX_SPQ_PENDING;
5151 bp->spq_prod_idx = 0;
5152 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5153 bp->spq_prod_bd = bp->spq;
5154 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5156 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5157 U64_LO(bp->spq_mapping));
5159 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5160 U64_HI(bp->spq_mapping));
5162 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5166 static void bnx2x_init_context(struct bnx2x *bp)
5170 for_each_rx_queue(bp, i) {
5171 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5172 struct bnx2x_fastpath *fp = &bp->fp[i];
5173 u8 cl_id = fp->cl_id;
5175 context->ustorm_st_context.common.sb_index_numbers =
5176 BNX2X_RX_SB_INDEX_NUM;
5177 context->ustorm_st_context.common.clientId = cl_id;
5178 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5179 context->ustorm_st_context.common.flags =
5180 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5181 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5182 context->ustorm_st_context.common.statistics_counter_id =
5184 context->ustorm_st_context.common.mc_alignment_log_size =
5185 BNX2X_RX_ALIGN_SHIFT;
5186 context->ustorm_st_context.common.bd_buff_size =
5188 context->ustorm_st_context.common.bd_page_base_hi =
5189 U64_HI(fp->rx_desc_mapping);
5190 context->ustorm_st_context.common.bd_page_base_lo =
5191 U64_LO(fp->rx_desc_mapping);
5192 if (!fp->disable_tpa) {
5193 context->ustorm_st_context.common.flags |=
5194 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5195 context->ustorm_st_context.common.sge_buff_size =
5196 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5198 context->ustorm_st_context.common.sge_page_base_hi =
5199 U64_HI(fp->rx_sge_mapping);
5200 context->ustorm_st_context.common.sge_page_base_lo =
5201 U64_LO(fp->rx_sge_mapping);
5203 context->ustorm_st_context.common.max_sges_for_packet =
5204 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5205 context->ustorm_st_context.common.max_sges_for_packet =
5206 ((context->ustorm_st_context.common.
5207 max_sges_for_packet + PAGES_PER_SGE - 1) &
5208 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5211 context->ustorm_ag_context.cdu_usage =
5212 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5213 CDU_REGION_NUMBER_UCM_AG,
5214 ETH_CONNECTION_TYPE);
5216 context->xstorm_ag_context.cdu_reserved =
5217 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5218 CDU_REGION_NUMBER_XCM_AG,
5219 ETH_CONNECTION_TYPE);
5222 for_each_tx_queue(bp, i) {
5223 struct bnx2x_fastpath *fp = &bp->fp[i];
5224 struct eth_context *context =
5225 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5227 context->cstorm_st_context.sb_index_number =
5228 C_SB_ETH_TX_CQ_INDEX;
5229 context->cstorm_st_context.status_block_id = fp->sb_id;
5231 context->xstorm_st_context.tx_bd_page_base_hi =
5232 U64_HI(fp->tx_desc_mapping);
5233 context->xstorm_st_context.tx_bd_page_base_lo =
5234 U64_LO(fp->tx_desc_mapping);
5235 context->xstorm_st_context.statistics_data = (fp->cl_id |
5236 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5240 static void bnx2x_init_ind_table(struct bnx2x *bp)
5242 int func = BP_FUNC(bp);
5245 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5249 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5250 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5251 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5252 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5253 bp->fp->cl_id + (i % bp->num_rx_queues));
5256 static void bnx2x_set_client_config(struct bnx2x *bp)
5258 struct tstorm_eth_client_config tstorm_client = {0};
5259 int port = BP_PORT(bp);
5262 tstorm_client.mtu = bp->dev->mtu;
5263 tstorm_client.config_flags =
5264 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5265 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5267 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5268 tstorm_client.config_flags |=
5269 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5270 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5274 for_each_queue(bp, i) {
5275 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5277 REG_WR(bp, BAR_TSTRORM_INTMEM +
5278 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5279 ((u32 *)&tstorm_client)[0]);
5280 REG_WR(bp, BAR_TSTRORM_INTMEM +
5281 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5282 ((u32 *)&tstorm_client)[1]);
5285 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5286 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5289 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5291 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5292 int mode = bp->rx_mode;
5293 int mask = bp->rx_mode_cl_mask;
5294 int func = BP_FUNC(bp);
5295 int port = BP_PORT(bp);
5297 /* All but management unicast packets should pass to the host as well */
5299 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5300 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5301 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5302 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5304 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5307 case BNX2X_RX_MODE_NONE: /* no Rx */
5308 tstorm_mac_filter.ucast_drop_all = mask;
5309 tstorm_mac_filter.mcast_drop_all = mask;
5310 tstorm_mac_filter.bcast_drop_all = mask;
5313 case BNX2X_RX_MODE_NORMAL:
5314 tstorm_mac_filter.bcast_accept_all = mask;
5317 case BNX2X_RX_MODE_ALLMULTI:
5318 tstorm_mac_filter.mcast_accept_all = mask;
5319 tstorm_mac_filter.bcast_accept_all = mask;
5322 case BNX2X_RX_MODE_PROMISC:
5323 tstorm_mac_filter.ucast_accept_all = mask;
5324 tstorm_mac_filter.mcast_accept_all = mask;
5325 tstorm_mac_filter.bcast_accept_all = mask;
5326 /* pass management unicast packets as well */
5327 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5331 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5336 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5339 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5340 REG_WR(bp, BAR_TSTRORM_INTMEM +
5341 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5342 ((u32 *)&tstorm_mac_filter)[i]);
5344 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5345 ((u32 *)&tstorm_mac_filter)[i]); */
5348 if (mode != BNX2X_RX_MODE_NONE)
5349 bnx2x_set_client_config(bp);
5352 static void bnx2x_init_internal_common(struct bnx2x *bp)
5356 /* Zero this manually as its initialization is
5357 currently missing in the initTool */
5358 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5359 REG_WR(bp, BAR_USTRORM_INTMEM +
5360 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5363 static void bnx2x_init_internal_port(struct bnx2x *bp)
5365 int port = BP_PORT(bp);
5368 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5370 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5371 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5372 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5375 static void bnx2x_init_internal_func(struct bnx2x *bp)
5377 struct tstorm_eth_function_common_config tstorm_config = {0};
5378 struct stats_indication_flags stats_flags = {0};
5379 int port = BP_PORT(bp);
5380 int func = BP_FUNC(bp);
5386 tstorm_config.config_flags = MULTI_FLAGS(bp);
5387 tstorm_config.rss_result_mask = MULTI_MASK;
5390 /* Enable TPA if needed */
5391 if (bp->flags & TPA_ENABLE_FLAG)
5392 tstorm_config.config_flags |=
5393 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5396 tstorm_config.config_flags |=
5397 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5399 tstorm_config.leading_client_id = BP_L_ID(bp);
5401 REG_WR(bp, BAR_TSTRORM_INTMEM +
5402 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5403 (*(u32 *)&tstorm_config));
5405 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5406 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5407 bnx2x_set_storm_rx_mode(bp);
5409 for_each_queue(bp, i) {
5410 u8 cl_id = bp->fp[i].cl_id;
5412 /* reset xstorm per client statistics */
5413 offset = BAR_XSTRORM_INTMEM +
5414 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5416 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5417 REG_WR(bp, offset + j*4, 0);
5419 /* reset tstorm per client statistics */
5420 offset = BAR_TSTRORM_INTMEM +
5421 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5423 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5424 REG_WR(bp, offset + j*4, 0);
5426 /* reset ustorm per client statistics */
5427 offset = BAR_USTRORM_INTMEM +
5428 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5430 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5431 REG_WR(bp, offset + j*4, 0);
5434 /* Init statistics related context */
5435 stats_flags.collect_eth = 1;
5437 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5438 ((u32 *)&stats_flags)[0]);
5439 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5440 ((u32 *)&stats_flags)[1]);
5442 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5443 ((u32 *)&stats_flags)[0]);
5444 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5445 ((u32 *)&stats_flags)[1]);
5447 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5448 ((u32 *)&stats_flags)[0]);
5449 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5450 ((u32 *)&stats_flags)[1]);
5452 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5453 ((u32 *)&stats_flags)[0]);
5454 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5455 ((u32 *)&stats_flags)[1]);
5457 REG_WR(bp, BAR_XSTRORM_INTMEM +
5458 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5459 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5460 REG_WR(bp, BAR_XSTRORM_INTMEM +
5461 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5462 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5464 REG_WR(bp, BAR_TSTRORM_INTMEM +
5465 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5466 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5467 REG_WR(bp, BAR_TSTRORM_INTMEM +
5468 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5469 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5471 REG_WR(bp, BAR_USTRORM_INTMEM +
5472 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5473 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5474 REG_WR(bp, BAR_USTRORM_INTMEM +
5475 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5476 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5478 if (CHIP_IS_E1H(bp)) {
5479 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5481 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5483 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5485 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5488 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5492 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5494 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5495 SGE_PAGE_SIZE * PAGES_PER_SGE),
5497 for_each_rx_queue(bp, i) {
5498 struct bnx2x_fastpath *fp = &bp->fp[i];
5500 REG_WR(bp, BAR_USTRORM_INTMEM +
5501 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5502 U64_LO(fp->rx_comp_mapping));
5503 REG_WR(bp, BAR_USTRORM_INTMEM +
5504 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5505 U64_HI(fp->rx_comp_mapping));
5508 REG_WR(bp, BAR_USTRORM_INTMEM +
5509 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5510 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5511 REG_WR(bp, BAR_USTRORM_INTMEM +
5512 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5513 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5515 REG_WR16(bp, BAR_USTRORM_INTMEM +
5516 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5520 /* dropless flow control */
5521 if (CHIP_IS_E1H(bp)) {
5522 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5524 rx_pause.bd_thr_low = 250;
5525 rx_pause.cqe_thr_low = 250;
5527 rx_pause.sge_thr_low = 0;
5528 rx_pause.bd_thr_high = 350;
5529 rx_pause.cqe_thr_high = 350;
5530 rx_pause.sge_thr_high = 0;
5532 for_each_rx_queue(bp, i) {
5533 struct bnx2x_fastpath *fp = &bp->fp[i];
5535 if (!fp->disable_tpa) {
5536 rx_pause.sge_thr_low = 150;
5537 rx_pause.sge_thr_high = 250;
5541 offset = BAR_USTRORM_INTMEM +
5542 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5545 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5547 REG_WR(bp, offset + j*4,
5548 ((u32 *)&rx_pause)[j]);
5552 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5554 /* Init rate shaping and fairness contexts */
5558 /* During init there is no active link
5559 Until link is up, set link rate to 10Gbps */
5560 bp->link_vars.line_speed = SPEED_10000;
5561 bnx2x_init_port_minmax(bp);
5565 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5566 bnx2x_calc_vn_weight_sum(bp);
5568 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5569 bnx2x_init_vn_minmax(bp, 2*vn + port);
5571 /* Enable rate shaping and fairness */
5572 bp->cmng.flags.cmng_enables |=
5573 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5576 /* rate shaping and fairness are disabled */
5578 "single function mode minmax will be disabled\n");
5582 /* Store it to internal memory */
5584 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5585 REG_WR(bp, BAR_XSTRORM_INTMEM +
5586 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5587 ((u32 *)(&bp->cmng))[i]);
5590 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5592 switch (load_code) {
5593 case FW_MSG_CODE_DRV_LOAD_COMMON:
5594 bnx2x_init_internal_common(bp);
5597 case FW_MSG_CODE_DRV_LOAD_PORT:
5598 bnx2x_init_internal_port(bp);
5601 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5602 bnx2x_init_internal_func(bp);
5606 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5611 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5615 for_each_queue(bp, i) {
5616 struct bnx2x_fastpath *fp = &bp->fp[i];
5619 fp->state = BNX2X_FP_STATE_CLOSED;
5621 fp->cl_id = BP_L_ID(bp) + i;
5623 fp->sb_id = fp->cl_id + 1;
5625 fp->sb_id = fp->cl_id;
5627 /* Suitable Rx and Tx SBs are served by the same client */
5628 if (i >= bp->num_rx_queues)
5629 fp->cl_id -= bp->num_rx_queues;
5631 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5632 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5633 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5635 bnx2x_update_fpsb_idx(fp);
5638 /* ensure status block indices were read */
5642 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5644 bnx2x_update_dsb_idx(bp);
5645 bnx2x_update_coalesce(bp);
5646 bnx2x_init_rx_rings(bp);
5647 bnx2x_init_tx_ring(bp);
5648 bnx2x_init_sp_ring(bp);
5649 bnx2x_init_context(bp);
5650 bnx2x_init_internal(bp, load_code);
5651 bnx2x_init_ind_table(bp);
5652 bnx2x_stats_init(bp);
5654 /* At this point, we are ready for interrupts */
5655 atomic_set(&bp->intr_sem, 0);
5657 /* flush all before enabling interrupts */
5661 bnx2x_int_enable(bp);
5663 /* Check for SPIO5 */
5664 bnx2x_attn_int_deasserted0(bp,
5665 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5666 AEU_INPUTS_ATTN_BITS_SPIO5);
5669 /* end of nic init */
5672 * gzip service functions
5675 static int bnx2x_gunzip_init(struct bnx2x *bp)
5677 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5678 &bp->gunzip_mapping);
5679 if (bp->gunzip_buf == NULL)
5682 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5683 if (bp->strm == NULL)
5686 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5688 if (bp->strm->workspace == NULL)
5698 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5699 bp->gunzip_mapping);
5700 bp->gunzip_buf = NULL;
5703 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5704 " un-compression\n", bp->dev->name);
5708 static void bnx2x_gunzip_end(struct bnx2x *bp)
5710 kfree(bp->strm->workspace);
5715 if (bp->gunzip_buf) {
5716 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5717 bp->gunzip_mapping);
5718 bp->gunzip_buf = NULL;
5722 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5726 /* check gzip header */
5727 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5728 BNX2X_ERR("Bad gzip header\n");
5736 if (zbuf[3] & FNAME)
5737 while ((zbuf[n++] != 0) && (n < len));
5739 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5740 bp->strm->avail_in = len - n;
5741 bp->strm->next_out = bp->gunzip_buf;
5742 bp->strm->avail_out = FW_BUF_SIZE;
5744 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5748 rc = zlib_inflate(bp->strm, Z_FINISH);
5749 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5750 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5751 bp->dev->name, bp->strm->msg);
5753 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5754 if (bp->gunzip_outlen & 0x3)
5755 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5756 " gunzip_outlen (%d) not aligned\n",
5757 bp->dev->name, bp->gunzip_outlen);
5758 bp->gunzip_outlen >>= 2;
5760 zlib_inflateEnd(bp->strm);
5762 if (rc == Z_STREAM_END)
5768 /* nic load/unload */
5771 * General service functions
5774 /* send a NIG loopback debug packet */
5775 static void bnx2x_lb_pckt(struct bnx2x *bp)
5779 /* Ethernet source and destination addresses */
5780 wb_write[0] = 0x55555555;
5781 wb_write[1] = 0x55555555;
5782 wb_write[2] = 0x20; /* SOP */
5783 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5785 /* NON-IP protocol */
5786 wb_write[0] = 0x09000000;
5787 wb_write[1] = 0x55555555;
5788 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5789 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5792 /* some of the internal memories
5793 * are not directly readable from the driver
5794 * to test them we send debug packets
5796 static int bnx2x_int_mem_test(struct bnx2x *bp)
5802 if (CHIP_REV_IS_FPGA(bp))
5804 else if (CHIP_REV_IS_EMUL(bp))
5809 DP(NETIF_MSG_HW, "start part1\n");
5811 /* Disable inputs of parser neighbor blocks */
5812 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5813 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5814 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5815 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5817 /* Write 0 to parser credits for CFC search request */
5818 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5820 /* send Ethernet packet */
5823 /* TODO do i reset NIG statistic? */
5824 /* Wait until NIG register shows 1 packet of size 0x10 */
5825 count = 1000 * factor;
5828 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5829 val = *bnx2x_sp(bp, wb_data[0]);
5837 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5841 /* Wait until PRS register shows 1 packet */
5842 count = 1000 * factor;
5844 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5852 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5856 /* Reset and init BRB, PRS */
5857 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5859 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5861 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5862 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5864 DP(NETIF_MSG_HW, "part2\n");
5866 /* Disable inputs of parser neighbor blocks */
5867 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5868 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5869 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5870 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5872 /* Write 0 to parser credits for CFC search request */
5873 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5875 /* send 10 Ethernet packets */
5876 for (i = 0; i < 10; i++)
5879 /* Wait until NIG register shows 10 + 1
5880 packets of size 11*0x10 = 0xb0 */
5881 count = 1000 * factor;
5884 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5885 val = *bnx2x_sp(bp, wb_data[0]);
5893 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5897 /* Wait until PRS register shows 2 packets */
5898 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5900 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5902 /* Write 1 to parser credits for CFC search request */
5903 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5905 /* Wait until PRS register shows 3 packets */
5906 msleep(10 * factor);
5907 /* Wait until NIG register shows 1 packet of size 0x10 */
5908 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5910 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5912 /* clear NIG EOP FIFO */
5913 for (i = 0; i < 11; i++)
5914 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5915 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5917 BNX2X_ERR("clear of NIG failed\n");
5921 /* Reset and init BRB, PRS, NIG */
5922 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5924 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5926 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5927 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5930 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5933 /* Enable inputs of parser neighbor blocks */
5934 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5935 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5936 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5937 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5939 DP(NETIF_MSG_HW, "done\n");
5944 static void enable_blocks_attention(struct bnx2x *bp)
5946 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5947 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5948 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5949 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5950 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5951 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5952 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5953 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5954 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5955 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5956 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5957 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5958 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5959 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5960 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5961 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5962 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5963 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5964 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5965 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5966 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5967 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5968 if (CHIP_REV_IS_FPGA(bp))
5969 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5971 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5972 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5973 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5974 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5975 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5976 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5977 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5978 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5979 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5980 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5984 static void bnx2x_reset_common(struct bnx2x *bp)
5987 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5989 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5992 static void bnx2x_init_pxp(struct bnx2x *bp)
5995 int r_order, w_order;
5997 pci_read_config_word(bp->pdev,
5998 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5999 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6000 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6002 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6004 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6008 bnx2x_init_pxp_arb(bp, r_order, w_order);
6011 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6017 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6018 SHARED_HW_CFG_FAN_FAILURE_MASK;
6020 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6024 * The fan failure mechanism is usually related to the PHY type since
6025 * the power consumption of the board is affected by the PHY. Currently,
6026 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6028 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6029 for (port = PORT_0; port < PORT_MAX; port++) {
6031 SHMEM_RD(bp, dev_info.port_hw_config[port].
6032 external_phy_config) &
6033 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6036 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6038 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6040 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6043 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6045 if (is_required == 0)
6048 /* Fan failure is indicated by SPIO 5 */
6049 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6050 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6052 /* set to active low mode */
6053 val = REG_RD(bp, MISC_REG_SPIO_INT);
6054 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6055 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6056 REG_WR(bp, MISC_REG_SPIO_INT, val);
6058 /* enable interrupt to signal the IGU */
6059 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6060 val |= (1 << MISC_REGISTERS_SPIO_5);
6061 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6064 static int bnx2x_init_common(struct bnx2x *bp)
6071 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6073 bnx2x_reset_common(bp);
6074 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6075 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6077 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6078 if (CHIP_IS_E1H(bp))
6079 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6081 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6083 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6085 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6086 if (CHIP_IS_E1(bp)) {
6087 /* enable HW interrupt from PXP on USDM overflow
6088 bit 16 on INT_MASK_0 */
6089 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6092 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6096 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6097 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6098 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6099 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6100 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6101 /* make sure this value is 0 */
6102 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6104 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6105 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6106 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6107 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6108 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6111 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6113 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6114 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6115 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6118 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6119 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6121 /* let the HW do it's magic ... */
6123 /* finish PXP init */
6124 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6126 BNX2X_ERR("PXP2 CFG failed\n");
6129 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6131 BNX2X_ERR("PXP2 RD_INIT failed\n");
6135 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6136 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6138 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6140 /* clean the DMAE memory */
6142 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6144 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6145 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6146 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6147 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6149 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6150 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6151 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6152 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6154 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6159 for (i = 0; i < 64; i++) {
6160 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6161 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6163 if (CHIP_IS_E1H(bp)) {
6164 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6165 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6170 /* soft reset pulse */
6171 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6172 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6175 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6178 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6179 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6180 if (!CHIP_REV_IS_SLOW(bp)) {
6181 /* enable hw interrupt from doorbell Q */
6182 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6185 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6186 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6187 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6190 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6192 if (CHIP_IS_E1H(bp))
6193 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6195 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6196 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6197 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6198 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6200 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6201 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6202 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6203 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6205 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6206 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6207 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6208 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6211 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6213 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6216 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6217 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6218 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6220 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6221 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6222 REG_WR(bp, i, 0xc0cac01a);
6223 /* TODO: replace with something meaningful */
6225 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6227 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6228 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6229 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6230 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6231 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6232 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6233 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6234 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6235 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6236 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6238 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6240 if (sizeof(union cdu_context) != 1024)
6241 /* we currently assume that a context is 1024 bytes */
6242 printk(KERN_ALERT PFX "please adjust the size of"
6243 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6245 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6246 val = (4 << 24) + (0 << 12) + 1024;
6247 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6249 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6250 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6251 /* enable context validation interrupt from CFC */
6252 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6254 /* set the thresholds to prevent CFC/CDU race */
6255 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6257 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6258 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6260 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6261 /* Reset PCIE errors for debug */
6262 REG_WR(bp, 0x2814, 0xffffffff);
6263 REG_WR(bp, 0x3820, 0xffffffff);
6265 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6266 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6267 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6268 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6270 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6271 if (CHIP_IS_E1H(bp)) {
6272 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6273 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6276 if (CHIP_REV_IS_SLOW(bp))
6279 /* finish CFC init */
6280 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6282 BNX2X_ERR("CFC LL_INIT failed\n");
6285 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6287 BNX2X_ERR("CFC AC_INIT failed\n");
6290 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6292 BNX2X_ERR("CFC CAM_INIT failed\n");
6295 REG_WR(bp, CFC_REG_DEBUG0, 0);
6297 /* read NIG statistic
6298 to see if this is our first up since powerup */
6299 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6300 val = *bnx2x_sp(bp, wb_data[0]);
6302 /* do internal memory self test */
6303 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6304 BNX2X_ERR("internal mem self test failed\n");
6308 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6309 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6310 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6312 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6313 bp->port.need_hw_lock = 1;
6320 bnx2x_setup_fan_failure_detection(bp);
6322 /* clear PXP2 attentions */
6323 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6325 enable_blocks_attention(bp);
6327 if (!BP_NOMCP(bp)) {
6328 bnx2x_acquire_phy_lock(bp);
6329 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6330 bnx2x_release_phy_lock(bp);
6332 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6337 static int bnx2x_init_port(struct bnx2x *bp)
6339 int port = BP_PORT(bp);
6340 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6344 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6346 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6348 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6349 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6351 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6352 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6353 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6354 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6357 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6359 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6360 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6361 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6363 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6365 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6366 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6367 /* no pause for emulation and FPGA */
6372 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6373 else if (bp->dev->mtu > 4096) {
6374 if (bp->flags & ONE_PORT_FLAG)
6378 /* (24*1024 + val*4)/256 */
6379 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6382 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6383 high = low + 56; /* 14*1024/256 */
6385 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6386 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6389 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6391 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6392 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6393 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6394 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6396 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6397 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6398 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6399 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6401 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6402 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6404 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6406 /* configure PBF to work without PAUSE mtu 9000 */
6407 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6409 /* update threshold */
6410 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6411 /* update init credit */
6412 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6415 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6417 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6420 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6422 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6423 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6425 if (CHIP_IS_E1(bp)) {
6426 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6427 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6429 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6431 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6432 /* init aeu_mask_attn_func_0/1:
6433 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6434 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6435 * bits 4-7 are used for "per vn group attention" */
6436 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6437 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6439 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6440 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6441 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6442 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6443 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6445 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6447 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6449 if (CHIP_IS_E1H(bp)) {
6450 /* 0x2 disable e1hov, 0x1 enable */
6451 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6452 (IS_E1HMF(bp) ? 0x1 : 0x2));
6455 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6456 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6457 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6461 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6462 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6464 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6465 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6467 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6469 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6470 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6472 /* The GPIO should be swapped if the swap register is
6474 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6475 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6477 /* Select function upon port-swap configuration */
6479 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6480 aeu_gpio_mask = (swap_val && swap_override) ?
6481 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6482 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6484 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6485 aeu_gpio_mask = (swap_val && swap_override) ?
6486 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6487 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6489 val = REG_RD(bp, offset);
6490 /* add GPIO3 to group */
6491 val |= aeu_gpio_mask;
6492 REG_WR(bp, offset, val);
6496 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6497 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6498 /* add SPIO 5 to group 0 */
6500 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6501 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6502 val = REG_RD(bp, reg_addr);
6503 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6504 REG_WR(bp, reg_addr, val);
6512 bnx2x__link_reset(bp);
6517 #define ILT_PER_FUNC (768/2)
6518 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6519 /* the phys address is shifted right 12 bits and has an added
6520 1=valid bit added to the 53rd bit
6521 then since this is a wide register(TM)
6522 we split it into two 32 bit writes
6524 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6525 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6526 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6527 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6530 #define CNIC_ILT_LINES 127
6531 #define CNIC_CTX_PER_ILT 16
6533 #define CNIC_ILT_LINES 0
6536 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6540 if (CHIP_IS_E1H(bp))
6541 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6543 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6545 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6548 static int bnx2x_init_func(struct bnx2x *bp)
6550 int port = BP_PORT(bp);
6551 int func = BP_FUNC(bp);
6555 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6557 /* set MSI reconfigure capability */
6558 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6559 val = REG_RD(bp, addr);
6560 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6561 REG_WR(bp, addr, val);
6563 i = FUNC_ILT_BASE(func);
6565 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6566 if (CHIP_IS_E1H(bp)) {
6567 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6568 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6570 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6571 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6574 i += 1 + CNIC_ILT_LINES;
6575 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6577 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6579 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6580 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6584 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6586 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6588 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6589 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6593 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6595 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6597 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6598 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6601 /* tell the searcher where the T2 table is */
6602 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6604 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6605 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6607 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6608 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6609 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6611 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6614 if (CHIP_IS_E1H(bp)) {
6615 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6616 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6617 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6618 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6619 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6620 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6621 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6622 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6623 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6625 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6626 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6629 /* HC init per function */
6630 if (CHIP_IS_E1H(bp)) {
6631 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6633 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6636 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6638 /* Reset PCIE errors for debug */
6639 REG_WR(bp, 0x2114, 0xffffffff);
6640 REG_WR(bp, 0x2120, 0xffffffff);
6645 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6649 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6650 BP_FUNC(bp), load_code);
6653 mutex_init(&bp->dmae_mutex);
6654 rc = bnx2x_gunzip_init(bp);
6658 switch (load_code) {
6659 case FW_MSG_CODE_DRV_LOAD_COMMON:
6660 rc = bnx2x_init_common(bp);
6665 case FW_MSG_CODE_DRV_LOAD_PORT:
6667 rc = bnx2x_init_port(bp);
6672 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6674 rc = bnx2x_init_func(bp);
6680 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6684 if (!BP_NOMCP(bp)) {
6685 int func = BP_FUNC(bp);
6687 bp->fw_drv_pulse_wr_seq =
6688 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6689 DRV_PULSE_SEQ_MASK);
6690 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6693 /* this needs to be done before gunzip end */
6694 bnx2x_zero_def_sb(bp);
6695 for_each_queue(bp, i)
6696 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6698 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6702 bnx2x_gunzip_end(bp);
6707 static void bnx2x_free_mem(struct bnx2x *bp)
6710 #define BNX2X_PCI_FREE(x, y, size) \
6713 pci_free_consistent(bp->pdev, size, x, y); \
6719 #define BNX2X_FREE(x) \
6731 for_each_queue(bp, i) {
6734 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6735 bnx2x_fp(bp, i, status_blk_mapping),
6736 sizeof(struct host_status_block));
6739 for_each_rx_queue(bp, i) {
6741 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6742 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6743 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6744 bnx2x_fp(bp, i, rx_desc_mapping),
6745 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6747 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6748 bnx2x_fp(bp, i, rx_comp_mapping),
6749 sizeof(struct eth_fast_path_rx_cqe) *
6753 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6754 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6755 bnx2x_fp(bp, i, rx_sge_mapping),
6756 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6759 for_each_tx_queue(bp, i) {
6761 /* fastpath tx rings: tx_buf tx_desc */
6762 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6763 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6764 bnx2x_fp(bp, i, tx_desc_mapping),
6765 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6767 /* end of fastpath */
6769 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6770 sizeof(struct host_def_status_block));
6772 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6773 sizeof(struct bnx2x_slowpath));
6776 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6777 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6778 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6779 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6780 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6781 sizeof(struct host_status_block));
6783 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6785 #undef BNX2X_PCI_FREE
6789 static int bnx2x_alloc_mem(struct bnx2x *bp)
6792 #define BNX2X_PCI_ALLOC(x, y, size) \
6794 x = pci_alloc_consistent(bp->pdev, size, y); \
6796 goto alloc_mem_err; \
6797 memset(x, 0, size); \
6800 #define BNX2X_ALLOC(x, size) \
6802 x = vmalloc(size); \
6804 goto alloc_mem_err; \
6805 memset(x, 0, size); \
6812 for_each_queue(bp, i) {
6813 bnx2x_fp(bp, i, bp) = bp;
6816 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6817 &bnx2x_fp(bp, i, status_blk_mapping),
6818 sizeof(struct host_status_block));
6821 for_each_rx_queue(bp, i) {
6823 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6824 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6825 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6826 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6827 &bnx2x_fp(bp, i, rx_desc_mapping),
6828 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6830 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6831 &bnx2x_fp(bp, i, rx_comp_mapping),
6832 sizeof(struct eth_fast_path_rx_cqe) *
6836 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6837 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6839 &bnx2x_fp(bp, i, rx_sge_mapping),
6840 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6843 for_each_tx_queue(bp, i) {
6845 /* fastpath tx rings: tx_buf tx_desc */
6846 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6847 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6848 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6849 &bnx2x_fp(bp, i, tx_desc_mapping),
6850 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6852 /* end of fastpath */
6854 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6855 sizeof(struct host_def_status_block));
6857 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6858 sizeof(struct bnx2x_slowpath));
6861 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6863 /* allocate searcher T2 table
6864 we allocate 1/4 of alloc num for T2
6865 (which is not entered into the ILT) */
6866 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6868 /* Initialize T2 (for 1024 connections) */
6869 for (i = 0; i < 16*1024; i += 64)
6870 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6872 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6873 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6875 /* QM queues (128*MAX_CONN) */
6876 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6878 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6879 sizeof(struct host_status_block));
6882 /* Slow path ring */
6883 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6891 #undef BNX2X_PCI_ALLOC
6895 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6899 for_each_tx_queue(bp, i) {
6900 struct bnx2x_fastpath *fp = &bp->fp[i];
6902 u16 bd_cons = fp->tx_bd_cons;
6903 u16 sw_prod = fp->tx_pkt_prod;
6904 u16 sw_cons = fp->tx_pkt_cons;
6906 while (sw_cons != sw_prod) {
6907 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6913 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6917 for_each_rx_queue(bp, j) {
6918 struct bnx2x_fastpath *fp = &bp->fp[j];
6920 for (i = 0; i < NUM_RX_BD; i++) {
6921 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6922 struct sk_buff *skb = rx_buf->skb;
6927 pci_unmap_single(bp->pdev,
6928 pci_unmap_addr(rx_buf, mapping),
6929 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6934 if (!fp->disable_tpa)
6935 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6936 ETH_MAX_AGGREGATION_QUEUES_E1 :
6937 ETH_MAX_AGGREGATION_QUEUES_E1H);
6941 static void bnx2x_free_skbs(struct bnx2x *bp)
6943 bnx2x_free_tx_skbs(bp);
6944 bnx2x_free_rx_skbs(bp);
6947 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6951 free_irq(bp->msix_table[0].vector, bp->dev);
6952 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6953 bp->msix_table[0].vector);
6958 for_each_queue(bp, i) {
6959 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6960 "state %x\n", i, bp->msix_table[i + offset].vector,
6961 bnx2x_fp(bp, i, state));
6963 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6967 static void bnx2x_free_irq(struct bnx2x *bp)
6969 if (bp->flags & USING_MSIX_FLAG) {
6970 bnx2x_free_msix_irqs(bp);
6971 pci_disable_msix(bp->pdev);
6972 bp->flags &= ~USING_MSIX_FLAG;
6974 } else if (bp->flags & USING_MSI_FLAG) {
6975 free_irq(bp->pdev->irq, bp->dev);
6976 pci_disable_msi(bp->pdev);
6977 bp->flags &= ~USING_MSI_FLAG;
6980 free_irq(bp->pdev->irq, bp->dev);
6983 static int bnx2x_enable_msix(struct bnx2x *bp)
6985 int i, rc, offset = 1;
6988 bp->msix_table[0].entry = igu_vec;
6989 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6992 igu_vec = BP_L_ID(bp) + offset;
6993 bp->msix_table[1].entry = igu_vec;
6994 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6997 for_each_queue(bp, i) {
6998 igu_vec = BP_L_ID(bp) + offset + i;
6999 bp->msix_table[i + offset].entry = igu_vec;
7000 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7001 "(fastpath #%u)\n", i + offset, igu_vec, i);
7004 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7005 BNX2X_NUM_QUEUES(bp) + offset);
7007 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7011 bp->flags |= USING_MSIX_FLAG;
7016 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7018 int i, rc, offset = 1;
7020 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7021 bp->dev->name, bp->dev);
7023 BNX2X_ERR("request sp irq failed\n");
7030 for_each_queue(bp, i) {
7031 struct bnx2x_fastpath *fp = &bp->fp[i];
7033 if (i < bp->num_rx_queues)
7034 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7036 sprintf(fp->name, "%s-tx-%d",
7037 bp->dev->name, i - bp->num_rx_queues);
7039 rc = request_irq(bp->msix_table[i + offset].vector,
7040 bnx2x_msix_fp_int, 0, fp->name, fp);
7042 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7043 bnx2x_free_msix_irqs(bp);
7047 fp->state = BNX2X_FP_STATE_IRQ;
7050 i = BNX2X_NUM_QUEUES(bp);
7051 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7053 bp->dev->name, bp->msix_table[0].vector,
7054 0, bp->msix_table[offset].vector,
7055 i - 1, bp->msix_table[offset + i - 1].vector);
7060 static int bnx2x_enable_msi(struct bnx2x *bp)
7064 rc = pci_enable_msi(bp->pdev);
7066 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7069 bp->flags |= USING_MSI_FLAG;
7074 static int bnx2x_req_irq(struct bnx2x *bp)
7076 unsigned long flags;
7079 if (bp->flags & USING_MSI_FLAG)
7082 flags = IRQF_SHARED;
7084 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7085 bp->dev->name, bp->dev);
7087 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7092 static void bnx2x_napi_enable(struct bnx2x *bp)
7096 for_each_rx_queue(bp, i)
7097 napi_enable(&bnx2x_fp(bp, i, napi));
7100 static void bnx2x_napi_disable(struct bnx2x *bp)
7104 for_each_rx_queue(bp, i)
7105 napi_disable(&bnx2x_fp(bp, i, napi));
7108 static void bnx2x_netif_start(struct bnx2x *bp)
7112 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7113 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7116 if (netif_running(bp->dev)) {
7117 bnx2x_napi_enable(bp);
7118 bnx2x_int_enable(bp);
7119 if (bp->state == BNX2X_STATE_OPEN)
7120 netif_tx_wake_all_queues(bp->dev);
7125 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7127 bnx2x_int_disable_sync(bp, disable_hw);
7128 bnx2x_napi_disable(bp);
7129 netif_tx_disable(bp->dev);
7130 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7134 * Init service functions
7138 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7140 * @param bp driver descriptor
7141 * @param set set or clear an entry (1 or 0)
7142 * @param mac pointer to a buffer containing a MAC
7143 * @param cl_bit_vec bit vector of clients to register a MAC for
7144 * @param cam_offset offset in a CAM to use
7145 * @param with_bcast set broadcast MAC as well
7147 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7148 u32 cl_bit_vec, u8 cam_offset,
7151 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7152 int port = BP_PORT(bp);
7155 * unicasts 0-31:port0 32-63:port1
7156 * multicast 64-127:port0 128-191:port1
7158 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7159 config->hdr.offset = cam_offset;
7160 config->hdr.client_id = 0xff;
7161 config->hdr.reserved1 = 0;
7164 config->config_table[0].cam_entry.msb_mac_addr =
7165 swab16(*(u16 *)&mac[0]);
7166 config->config_table[0].cam_entry.middle_mac_addr =
7167 swab16(*(u16 *)&mac[2]);
7168 config->config_table[0].cam_entry.lsb_mac_addr =
7169 swab16(*(u16 *)&mac[4]);
7170 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7172 config->config_table[0].target_table_entry.flags = 0;
7174 CAM_INVALIDATE(config->config_table[0]);
7175 config->config_table[0].target_table_entry.clients_bit_vector =
7176 cpu_to_le32(cl_bit_vec);
7177 config->config_table[0].target_table_entry.vlan_id = 0;
7179 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7180 (set ? "setting" : "clearing"),
7181 config->config_table[0].cam_entry.msb_mac_addr,
7182 config->config_table[0].cam_entry.middle_mac_addr,
7183 config->config_table[0].cam_entry.lsb_mac_addr);
7187 config->config_table[1].cam_entry.msb_mac_addr =
7188 cpu_to_le16(0xffff);
7189 config->config_table[1].cam_entry.middle_mac_addr =
7190 cpu_to_le16(0xffff);
7191 config->config_table[1].cam_entry.lsb_mac_addr =
7192 cpu_to_le16(0xffff);
7193 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7195 config->config_table[1].target_table_entry.flags =
7196 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7198 CAM_INVALIDATE(config->config_table[1]);
7199 config->config_table[1].target_table_entry.clients_bit_vector =
7200 cpu_to_le32(cl_bit_vec);
7201 config->config_table[1].target_table_entry.vlan_id = 0;
7204 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7205 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7206 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7210 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7212 * @param bp driver descriptor
7213 * @param set set or clear an entry (1 or 0)
7214 * @param mac pointer to a buffer containing a MAC
7215 * @param cl_bit_vec bit vector of clients to register a MAC for
7216 * @param cam_offset offset in a CAM to use
7218 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7219 u32 cl_bit_vec, u8 cam_offset)
7221 struct mac_configuration_cmd_e1h *config =
7222 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7224 config->hdr.length = 1;
7225 config->hdr.offset = cam_offset;
7226 config->hdr.client_id = 0xff;
7227 config->hdr.reserved1 = 0;
7230 config->config_table[0].msb_mac_addr =
7231 swab16(*(u16 *)&mac[0]);
7232 config->config_table[0].middle_mac_addr =
7233 swab16(*(u16 *)&mac[2]);
7234 config->config_table[0].lsb_mac_addr =
7235 swab16(*(u16 *)&mac[4]);
7236 config->config_table[0].clients_bit_vector =
7237 cpu_to_le32(cl_bit_vec);
7238 config->config_table[0].vlan_id = 0;
7239 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7241 config->config_table[0].flags = BP_PORT(bp);
7243 config->config_table[0].flags =
7244 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7246 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7247 (set ? "setting" : "clearing"),
7248 config->config_table[0].msb_mac_addr,
7249 config->config_table[0].middle_mac_addr,
7250 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7252 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7253 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7254 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7257 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7258 int *state_p, int poll)
7260 /* can take a while if any port is running */
7263 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7264 poll ? "polling" : "waiting", state, idx);
7269 bnx2x_rx_int(bp->fp, 10);
7270 /* if index is different from 0
7271 * the reply for some commands will
7272 * be on the non default queue
7275 bnx2x_rx_int(&bp->fp[idx], 10);
7278 mb(); /* state is changed by bnx2x_sp_event() */
7279 if (*state_p == state) {
7280 #ifdef BNX2X_STOP_ON_ERROR
7281 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7293 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7294 poll ? "polling" : "waiting", state, idx);
7295 #ifdef BNX2X_STOP_ON_ERROR
7302 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7304 bp->set_mac_pending++;
7307 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7308 (1 << bp->fp->cl_id), BP_FUNC(bp));
7310 /* Wait for a completion */
7311 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7314 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7316 bp->set_mac_pending++;
7319 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7320 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7323 /* Wait for a completion */
7324 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7329 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7330 * MAC(s). This function will wait until the ramdord completion
7333 * @param bp driver handle
7334 * @param set set or clear the CAM entry
7336 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7338 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7340 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7342 bp->set_mac_pending++;
7345 /* Send a SET_MAC ramrod */
7347 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7348 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7351 /* CAM allocation for E1H
7352 * unicasts: by func number
7353 * multicast: 20+FUNC*20, 20 each
7355 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7356 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7358 /* Wait for a completion when setting */
7359 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7365 static int bnx2x_setup_leading(struct bnx2x *bp)
7369 /* reset IGU state */
7370 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7373 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7375 /* Wait for completion */
7376 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7381 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7383 struct bnx2x_fastpath *fp = &bp->fp[index];
7385 /* reset IGU state */
7386 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7389 fp->state = BNX2X_FP_STATE_OPENING;
7390 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7393 /* Wait for completion */
7394 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7398 static int bnx2x_poll(struct napi_struct *napi, int budget);
7400 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7401 int *num_tx_queues_out)
7403 int _num_rx_queues = 0, _num_tx_queues = 0;
7405 switch (bp->multi_mode) {
7406 case ETH_RSS_MODE_DISABLED:
7411 case ETH_RSS_MODE_REGULAR:
7413 _num_rx_queues = min_t(u32, num_rx_queues,
7414 BNX2X_MAX_QUEUES(bp));
7416 _num_rx_queues = min_t(u32, num_online_cpus(),
7417 BNX2X_MAX_QUEUES(bp));
7420 _num_tx_queues = min_t(u32, num_tx_queues,
7421 BNX2X_MAX_QUEUES(bp));
7423 _num_tx_queues = min_t(u32, num_online_cpus(),
7424 BNX2X_MAX_QUEUES(bp));
7426 /* There must be not more Tx queues than Rx queues */
7427 if (_num_tx_queues > _num_rx_queues) {
7428 BNX2X_ERR("number of tx queues (%d) > "
7429 "number of rx queues (%d)"
7430 " defaulting to %d\n",
7431 _num_tx_queues, _num_rx_queues,
7433 _num_tx_queues = _num_rx_queues;
7444 *num_rx_queues_out = _num_rx_queues;
7445 *num_tx_queues_out = _num_tx_queues;
7448 static int bnx2x_set_int_mode(struct bnx2x *bp)
7455 bp->num_rx_queues = 1;
7456 bp->num_tx_queues = 1;
7457 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7462 /* Set interrupt mode according to bp->multi_mode value */
7463 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7464 &bp->num_tx_queues);
7466 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7467 bp->num_rx_queues, bp->num_tx_queues);
7469 /* if we can't use MSI-X we only need one fp,
7470 * so try to enable MSI-X with the requested number of fp's
7471 * and fallback to MSI or legacy INTx with one fp
7473 rc = bnx2x_enable_msix(bp);
7475 /* failed to enable MSI-X */
7477 BNX2X_ERR("Multi requested but failed to "
7478 "enable MSI-X (rx %d tx %d), "
7479 "set number of queues to 1\n",
7480 bp->num_rx_queues, bp->num_tx_queues);
7481 bp->num_rx_queues = 1;
7482 bp->num_tx_queues = 1;
7486 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7491 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7492 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7495 /* must be called with rtnl_lock */
7496 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7501 #ifdef BNX2X_STOP_ON_ERROR
7502 if (unlikely(bp->panic))
7506 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7508 rc = bnx2x_set_int_mode(bp);
7510 if (bnx2x_alloc_mem(bp))
7513 for_each_rx_queue(bp, i)
7514 bnx2x_fp(bp, i, disable_tpa) =
7515 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7517 for_each_rx_queue(bp, i)
7518 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7521 bnx2x_napi_enable(bp);
7523 if (bp->flags & USING_MSIX_FLAG) {
7524 rc = bnx2x_req_msix_irqs(bp);
7526 pci_disable_msix(bp->pdev);
7530 /* Fall to INTx if failed to enable MSI-X due to lack of
7531 memory (in bnx2x_set_int_mode()) */
7532 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7533 bnx2x_enable_msi(bp);
7535 rc = bnx2x_req_irq(bp);
7537 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7538 if (bp->flags & USING_MSI_FLAG)
7539 pci_disable_msi(bp->pdev);
7542 if (bp->flags & USING_MSI_FLAG) {
7543 bp->dev->irq = bp->pdev->irq;
7544 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7545 bp->dev->name, bp->pdev->irq);
7549 /* Send LOAD_REQUEST command to MCP
7550 Returns the type of LOAD command:
7551 if it is the first port to be initialized
7552 common blocks should be initialized, otherwise - not
7554 if (!BP_NOMCP(bp)) {
7555 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7557 BNX2X_ERR("MCP response failure, aborting\n");
7561 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7562 rc = -EBUSY; /* other port in diagnostic mode */
7567 int port = BP_PORT(bp);
7569 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7570 load_count[0], load_count[1], load_count[2]);
7572 load_count[1 + port]++;
7573 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7574 load_count[0], load_count[1], load_count[2]);
7575 if (load_count[0] == 1)
7576 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7577 else if (load_count[1 + port] == 1)
7578 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7580 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7583 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7584 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7588 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7591 rc = bnx2x_init_hw(bp, load_code);
7593 BNX2X_ERR("HW init failed, aborting\n");
7597 /* Setup NIC internals and enable interrupts */
7598 bnx2x_nic_init(bp, load_code);
7600 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7601 (bp->common.shmem2_base))
7602 SHMEM2_WR(bp, dcc_support,
7603 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7604 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7606 /* Send LOAD_DONE command to MCP */
7607 if (!BP_NOMCP(bp)) {
7608 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7610 BNX2X_ERR("MCP response failure, aborting\n");
7616 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7618 rc = bnx2x_setup_leading(bp);
7620 BNX2X_ERR("Setup leading failed!\n");
7621 #ifndef BNX2X_STOP_ON_ERROR
7629 if (CHIP_IS_E1H(bp))
7630 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7631 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7632 bp->state = BNX2X_STATE_DISABLED;
7635 if (bp->state == BNX2X_STATE_OPEN) {
7637 /* Enable Timer scan */
7638 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7640 for_each_nondefault_queue(bp, i) {
7641 rc = bnx2x_setup_multi(bp, i);
7651 bnx2x_set_eth_mac_addr_e1(bp, 1);
7653 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7655 /* Set iSCSI L2 MAC */
7656 mutex_lock(&bp->cnic_mutex);
7657 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7658 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7659 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7661 mutex_unlock(&bp->cnic_mutex);
7666 bnx2x_initial_phy_init(bp, load_mode);
7668 /* Start fast path */
7669 switch (load_mode) {
7671 if (bp->state == BNX2X_STATE_OPEN) {
7672 /* Tx queue should be only reenabled */
7673 netif_tx_wake_all_queues(bp->dev);
7675 /* Initialize the receive filter. */
7676 bnx2x_set_rx_mode(bp->dev);
7680 netif_tx_start_all_queues(bp->dev);
7681 if (bp->state != BNX2X_STATE_OPEN)
7682 netif_tx_disable(bp->dev);
7683 /* Initialize the receive filter. */
7684 bnx2x_set_rx_mode(bp->dev);
7688 /* Initialize the receive filter. */
7689 bnx2x_set_rx_mode(bp->dev);
7690 bp->state = BNX2X_STATE_DIAG;
7698 bnx2x__link_status_update(bp);
7700 /* start the timer */
7701 mod_timer(&bp->timer, jiffies + bp->current_interval);
7704 bnx2x_setup_cnic_irq_info(bp);
7705 if (bp->state == BNX2X_STATE_OPEN)
7706 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7713 /* Disable Timer scan */
7714 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7717 bnx2x_int_disable_sync(bp, 1);
7718 if (!BP_NOMCP(bp)) {
7719 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7720 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7723 /* Free SKBs, SGEs, TPA pool and driver internals */
7724 bnx2x_free_skbs(bp);
7725 for_each_rx_queue(bp, i)
7726 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7731 bnx2x_napi_disable(bp);
7732 for_each_rx_queue(bp, i)
7733 netif_napi_del(&bnx2x_fp(bp, i, napi));
7739 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7741 struct bnx2x_fastpath *fp = &bp->fp[index];
7744 /* halt the connection */
7745 fp->state = BNX2X_FP_STATE_HALTING;
7746 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7748 /* Wait for completion */
7749 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7751 if (rc) /* timeout */
7754 /* delete cfc entry */
7755 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7757 /* Wait for completion */
7758 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7763 static int bnx2x_stop_leading(struct bnx2x *bp)
7765 __le16 dsb_sp_prod_idx;
7766 /* if the other port is handling traffic,
7767 this can take a lot of time */
7773 /* Send HALT ramrod */
7774 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7775 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7777 /* Wait for completion */
7778 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7779 &(bp->fp[0].state), 1);
7780 if (rc) /* timeout */
7783 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7785 /* Send PORT_DELETE ramrod */
7786 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7788 /* Wait for completion to arrive on default status block
7789 we are going to reset the chip anyway
7790 so there is not much to do if this times out
7792 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7794 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7795 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7796 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7797 #ifdef BNX2X_STOP_ON_ERROR
7805 rmb(); /* Refresh the dsb_sp_prod */
7807 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7808 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7813 static void bnx2x_reset_func(struct bnx2x *bp)
7815 int port = BP_PORT(bp);
7816 int func = BP_FUNC(bp);
7820 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7821 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7824 /* Disable Timer scan */
7825 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7827 * Wait for at least 10ms and up to 2 second for the timers scan to
7830 for (i = 0; i < 200; i++) {
7832 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7837 base = FUNC_ILT_BASE(func);
7838 for (i = base; i < base + ILT_PER_FUNC; i++)
7839 bnx2x_ilt_wr(bp, i, 0);
7842 static void bnx2x_reset_port(struct bnx2x *bp)
7844 int port = BP_PORT(bp);
7847 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7849 /* Do not rcv packets to BRB */
7850 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7851 /* Do not direct rcv packets that are not for MCP to the BRB */
7852 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7853 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7856 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7859 /* Check for BRB port occupancy */
7860 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7862 DP(NETIF_MSG_IFDOWN,
7863 "BRB1 is not empty %d blocks are occupied\n", val);
7865 /* TODO: Close Doorbell port? */
7868 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7870 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7871 BP_FUNC(bp), reset_code);
7873 switch (reset_code) {
7874 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7875 bnx2x_reset_port(bp);
7876 bnx2x_reset_func(bp);
7877 bnx2x_reset_common(bp);
7880 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7881 bnx2x_reset_port(bp);
7882 bnx2x_reset_func(bp);
7885 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7886 bnx2x_reset_func(bp);
7890 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7895 /* must be called with rtnl_lock */
7896 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7898 int port = BP_PORT(bp);
7903 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7905 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7907 /* Set "drop all" */
7908 bp->rx_mode = BNX2X_RX_MODE_NONE;
7909 bnx2x_set_storm_rx_mode(bp);
7911 /* Disable HW interrupts, NAPI and Tx */
7912 bnx2x_netif_stop(bp, 1);
7914 del_timer_sync(&bp->timer);
7915 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7916 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7917 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7922 /* Wait until tx fastpath tasks complete */
7923 for_each_tx_queue(bp, i) {
7924 struct bnx2x_fastpath *fp = &bp->fp[i];
7927 while (bnx2x_has_tx_work_unload(fp)) {
7931 BNX2X_ERR("timeout waiting for queue[%d]\n",
7933 #ifdef BNX2X_STOP_ON_ERROR
7944 /* Give HW time to discard old tx messages */
7947 if (CHIP_IS_E1(bp)) {
7948 struct mac_configuration_cmd *config =
7949 bnx2x_sp(bp, mcast_config);
7951 bnx2x_set_eth_mac_addr_e1(bp, 0);
7953 for (i = 0; i < config->hdr.length; i++)
7954 CAM_INVALIDATE(config->config_table[i]);
7956 config->hdr.length = i;
7957 if (CHIP_REV_IS_SLOW(bp))
7958 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7960 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7961 config->hdr.client_id = bp->fp->cl_id;
7962 config->hdr.reserved1 = 0;
7964 bp->set_mac_pending++;
7967 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7968 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7969 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7972 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7974 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7976 for (i = 0; i < MC_HASH_SIZE; i++)
7977 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7979 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7982 /* Clear iSCSI L2 MAC */
7983 mutex_lock(&bp->cnic_mutex);
7984 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7985 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7986 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7988 mutex_unlock(&bp->cnic_mutex);
7991 if (unload_mode == UNLOAD_NORMAL)
7992 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7994 else if (bp->flags & NO_WOL_FLAG)
7995 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7998 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7999 u8 *mac_addr = bp->dev->dev_addr;
8001 /* The mac address is written to entries 1-4 to
8002 preserve entry 0 which is used by the PMF */
8003 u8 entry = (BP_E1HVN(bp) + 1)*8;
8005 val = (mac_addr[0] << 8) | mac_addr[1];
8006 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8008 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8009 (mac_addr[4] << 8) | mac_addr[5];
8010 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8012 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8015 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8017 /* Close multi and leading connections
8018 Completions for ramrods are collected in a synchronous way */
8019 for_each_nondefault_queue(bp, i)
8020 if (bnx2x_stop_multi(bp, i))
8023 rc = bnx2x_stop_leading(bp);
8025 BNX2X_ERR("Stop leading failed!\n");
8026 #ifdef BNX2X_STOP_ON_ERROR
8035 reset_code = bnx2x_fw_command(bp, reset_code);
8037 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8038 load_count[0], load_count[1], load_count[2]);
8040 load_count[1 + port]--;
8041 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8042 load_count[0], load_count[1], load_count[2]);
8043 if (load_count[0] == 0)
8044 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8045 else if (load_count[1 + port] == 0)
8046 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8048 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8051 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8052 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8053 bnx2x__link_reset(bp);
8055 /* Reset the chip */
8056 bnx2x_reset_chip(bp, reset_code);
8058 /* Report UNLOAD_DONE to MCP */
8060 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8064 /* Free SKBs, SGEs, TPA pool and driver internals */
8065 bnx2x_free_skbs(bp);
8066 for_each_rx_queue(bp, i)
8067 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8068 for_each_rx_queue(bp, i)
8069 netif_napi_del(&bnx2x_fp(bp, i, napi));
8072 bp->state = BNX2X_STATE_CLOSED;
8074 netif_carrier_off(bp->dev);
8079 static void bnx2x_reset_task(struct work_struct *work)
8081 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8083 #ifdef BNX2X_STOP_ON_ERROR
8084 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8085 " so reset not done to allow debug dump,\n"
8086 " you will need to reboot when done\n");
8092 if (!netif_running(bp->dev))
8093 goto reset_task_exit;
8095 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8096 bnx2x_nic_load(bp, LOAD_NORMAL);
8102 /* end of nic load/unload */
8107 * Init service functions
8110 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8113 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8114 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8115 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8116 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8117 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8118 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8119 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8120 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8122 BNX2X_ERR("Unsupported function index: %d\n", func);
8127 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8129 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8131 /* Flush all outstanding writes */
8134 /* Pretend to be function 0 */
8136 /* Flush the GRC transaction (in the chip) */
8137 new_val = REG_RD(bp, reg);
8139 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8144 /* From now we are in the "like-E1" mode */
8145 bnx2x_int_disable(bp);
8147 /* Flush all outstanding writes */
8150 /* Restore the original funtion settings */
8151 REG_WR(bp, reg, orig_func);
8152 new_val = REG_RD(bp, reg);
8153 if (new_val != orig_func) {
8154 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8155 orig_func, new_val);
8160 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8162 if (CHIP_IS_E1H(bp))
8163 bnx2x_undi_int_disable_e1h(bp, func);
8165 bnx2x_int_disable(bp);
8168 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8172 /* Check if there is any driver already loaded */
8173 val = REG_RD(bp, MISC_REG_UNPREPARED);
8175 /* Check if it is the UNDI driver
8176 * UNDI driver initializes CID offset for normal bell to 0x7
8178 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8179 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8181 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8183 int func = BP_FUNC(bp);
8187 /* clear the UNDI indication */
8188 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8190 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8192 /* try unload UNDI on port 0 */
8195 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8196 DRV_MSG_SEQ_NUMBER_MASK);
8197 reset_code = bnx2x_fw_command(bp, reset_code);
8199 /* if UNDI is loaded on the other port */
8200 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8202 /* send "DONE" for previous unload */
8203 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8205 /* unload UNDI on port 1 */
8208 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8209 DRV_MSG_SEQ_NUMBER_MASK);
8210 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8212 bnx2x_fw_command(bp, reset_code);
8215 /* now it's safe to release the lock */
8216 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8218 bnx2x_undi_int_disable(bp, func);
8220 /* close input traffic and wait for it */
8221 /* Do not rcv packets to BRB */
8223 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8224 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8225 /* Do not direct rcv packets that are not for MCP to
8228 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8229 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8232 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8233 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8236 /* save NIG port swap info */
8237 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8238 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8241 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8244 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8246 /* take the NIG out of reset and restore swap values */
8248 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8249 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8250 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8251 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8253 /* send unload done to the MCP */
8254 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8256 /* restore our func and fw_seq */
8259 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8260 DRV_MSG_SEQ_NUMBER_MASK);
8263 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8267 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8269 u32 val, val2, val3, val4, id;
8272 /* Get the chip revision id and number. */
8273 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8274 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8275 id = ((val & 0xffff) << 16);
8276 val = REG_RD(bp, MISC_REG_CHIP_REV);
8277 id |= ((val & 0xf) << 12);
8278 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8279 id |= ((val & 0xff) << 4);
8280 val = REG_RD(bp, MISC_REG_BOND_ID);
8282 bp->common.chip_id = id;
8283 bp->link_params.chip_id = bp->common.chip_id;
8284 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8286 val = (REG_RD(bp, 0x2874) & 0x55);
8287 if ((bp->common.chip_id & 0x1) ||
8288 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8289 bp->flags |= ONE_PORT_FLAG;
8290 BNX2X_DEV_INFO("single port device\n");
8293 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8294 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8295 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8296 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8297 bp->common.flash_size, bp->common.flash_size);
8299 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8300 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8301 bp->link_params.shmem_base = bp->common.shmem_base;
8302 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8303 bp->common.shmem_base, bp->common.shmem2_base);
8305 if (!bp->common.shmem_base ||
8306 (bp->common.shmem_base < 0xA0000) ||
8307 (bp->common.shmem_base >= 0xC0000)) {
8308 BNX2X_DEV_INFO("MCP not active\n");
8309 bp->flags |= NO_MCP_FLAG;
8313 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8314 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8315 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8316 BNX2X_ERR("BAD MCP validity signature\n");
8318 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8319 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8321 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8322 SHARED_HW_CFG_LED_MODE_MASK) >>
8323 SHARED_HW_CFG_LED_MODE_SHIFT);
8325 bp->link_params.feature_config_flags = 0;
8326 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8327 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8328 bp->link_params.feature_config_flags |=
8329 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8331 bp->link_params.feature_config_flags &=
8332 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8334 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8335 bp->common.bc_ver = val;
8336 BNX2X_DEV_INFO("bc_ver %X\n", val);
8337 if (val < BNX2X_BC_VER) {
8338 /* for now only warn
8339 * later we might need to enforce this */
8340 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8341 " please upgrade BC\n", BNX2X_BC_VER, val);
8343 bp->link_params.feature_config_flags |=
8344 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8345 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8347 if (BP_E1HVN(bp) == 0) {
8348 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8349 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8351 /* no WOL capability for E1HVN != 0 */
8352 bp->flags |= NO_WOL_FLAG;
8354 BNX2X_DEV_INFO("%sWoL capable\n",
8355 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8357 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8358 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8359 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8360 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8362 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8363 val, val2, val3, val4);
8366 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8369 int port = BP_PORT(bp);
8372 switch (switch_cfg) {
8374 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8377 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8378 switch (ext_phy_type) {
8379 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8380 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8383 bp->port.supported |= (SUPPORTED_10baseT_Half |
8384 SUPPORTED_10baseT_Full |
8385 SUPPORTED_100baseT_Half |
8386 SUPPORTED_100baseT_Full |
8387 SUPPORTED_1000baseT_Full |
8388 SUPPORTED_2500baseX_Full |
8393 SUPPORTED_Asym_Pause);
8396 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8397 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8400 bp->port.supported |= (SUPPORTED_10baseT_Half |
8401 SUPPORTED_10baseT_Full |
8402 SUPPORTED_100baseT_Half |
8403 SUPPORTED_100baseT_Full |
8404 SUPPORTED_1000baseT_Full |
8409 SUPPORTED_Asym_Pause);
8413 BNX2X_ERR("NVRAM config error. "
8414 "BAD SerDes ext_phy_config 0x%x\n",
8415 bp->link_params.ext_phy_config);
8419 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8421 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8424 case SWITCH_CFG_10G:
8425 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8428 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8429 switch (ext_phy_type) {
8430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8431 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8434 bp->port.supported |= (SUPPORTED_10baseT_Half |
8435 SUPPORTED_10baseT_Full |
8436 SUPPORTED_100baseT_Half |
8437 SUPPORTED_100baseT_Full |
8438 SUPPORTED_1000baseT_Full |
8439 SUPPORTED_2500baseX_Full |
8440 SUPPORTED_10000baseT_Full |
8445 SUPPORTED_Asym_Pause);
8448 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8449 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8452 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8453 SUPPORTED_1000baseT_Full |
8457 SUPPORTED_Asym_Pause);
8460 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8461 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8464 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8465 SUPPORTED_2500baseX_Full |
8466 SUPPORTED_1000baseT_Full |
8470 SUPPORTED_Asym_Pause);
8473 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8474 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8477 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8480 SUPPORTED_Asym_Pause);
8483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8484 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8487 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8488 SUPPORTED_1000baseT_Full |
8491 SUPPORTED_Asym_Pause);
8494 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8495 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8498 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8499 SUPPORTED_1000baseT_Full |
8503 SUPPORTED_Asym_Pause);
8506 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8507 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8510 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8511 SUPPORTED_1000baseT_Full |
8515 SUPPORTED_Asym_Pause);
8518 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8519 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8522 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8526 SUPPORTED_Asym_Pause);
8529 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8530 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8533 bp->port.supported |= (SUPPORTED_10baseT_Half |
8534 SUPPORTED_10baseT_Full |
8535 SUPPORTED_100baseT_Half |
8536 SUPPORTED_100baseT_Full |
8537 SUPPORTED_1000baseT_Full |
8538 SUPPORTED_10000baseT_Full |
8542 SUPPORTED_Asym_Pause);
8545 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8546 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8547 bp->link_params.ext_phy_config);
8551 BNX2X_ERR("NVRAM config error. "
8552 "BAD XGXS ext_phy_config 0x%x\n",
8553 bp->link_params.ext_phy_config);
8557 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8559 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8564 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8565 bp->port.link_config);
8568 bp->link_params.phy_addr = bp->port.phy_addr;
8570 /* mask what we support according to speed_cap_mask */
8571 if (!(bp->link_params.speed_cap_mask &
8572 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8573 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8575 if (!(bp->link_params.speed_cap_mask &
8576 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8577 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8579 if (!(bp->link_params.speed_cap_mask &
8580 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8581 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8583 if (!(bp->link_params.speed_cap_mask &
8584 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8585 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8587 if (!(bp->link_params.speed_cap_mask &
8588 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8589 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8590 SUPPORTED_1000baseT_Full);
8592 if (!(bp->link_params.speed_cap_mask &
8593 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8594 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8596 if (!(bp->link_params.speed_cap_mask &
8597 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8598 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8600 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8603 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8605 bp->link_params.req_duplex = DUPLEX_FULL;
8607 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8608 case PORT_FEATURE_LINK_SPEED_AUTO:
8609 if (bp->port.supported & SUPPORTED_Autoneg) {
8610 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8611 bp->port.advertising = bp->port.supported;
8614 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8616 if ((ext_phy_type ==
8617 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8619 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8620 /* force 10G, no AN */
8621 bp->link_params.req_line_speed = SPEED_10000;
8622 bp->port.advertising =
8623 (ADVERTISED_10000baseT_Full |
8627 BNX2X_ERR("NVRAM config error. "
8628 "Invalid link_config 0x%x"
8629 " Autoneg not supported\n",
8630 bp->port.link_config);
8635 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8636 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8637 bp->link_params.req_line_speed = SPEED_10;
8638 bp->port.advertising = (ADVERTISED_10baseT_Full |
8641 BNX2X_ERR("NVRAM config error. "
8642 "Invalid link_config 0x%x"
8643 " speed_cap_mask 0x%x\n",
8644 bp->port.link_config,
8645 bp->link_params.speed_cap_mask);
8650 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8651 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8652 bp->link_params.req_line_speed = SPEED_10;
8653 bp->link_params.req_duplex = DUPLEX_HALF;
8654 bp->port.advertising = (ADVERTISED_10baseT_Half |
8657 BNX2X_ERR("NVRAM config error. "
8658 "Invalid link_config 0x%x"
8659 " speed_cap_mask 0x%x\n",
8660 bp->port.link_config,
8661 bp->link_params.speed_cap_mask);
8666 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8667 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8668 bp->link_params.req_line_speed = SPEED_100;
8669 bp->port.advertising = (ADVERTISED_100baseT_Full |
8672 BNX2X_ERR("NVRAM config error. "
8673 "Invalid link_config 0x%x"
8674 " speed_cap_mask 0x%x\n",
8675 bp->port.link_config,
8676 bp->link_params.speed_cap_mask);
8681 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8682 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8683 bp->link_params.req_line_speed = SPEED_100;
8684 bp->link_params.req_duplex = DUPLEX_HALF;
8685 bp->port.advertising = (ADVERTISED_100baseT_Half |
8688 BNX2X_ERR("NVRAM config error. "
8689 "Invalid link_config 0x%x"
8690 " speed_cap_mask 0x%x\n",
8691 bp->port.link_config,
8692 bp->link_params.speed_cap_mask);
8697 case PORT_FEATURE_LINK_SPEED_1G:
8698 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8699 bp->link_params.req_line_speed = SPEED_1000;
8700 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8703 BNX2X_ERR("NVRAM config error. "
8704 "Invalid link_config 0x%x"
8705 " speed_cap_mask 0x%x\n",
8706 bp->port.link_config,
8707 bp->link_params.speed_cap_mask);
8712 case PORT_FEATURE_LINK_SPEED_2_5G:
8713 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8714 bp->link_params.req_line_speed = SPEED_2500;
8715 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8718 BNX2X_ERR("NVRAM config error. "
8719 "Invalid link_config 0x%x"
8720 " speed_cap_mask 0x%x\n",
8721 bp->port.link_config,
8722 bp->link_params.speed_cap_mask);
8727 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8728 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8729 case PORT_FEATURE_LINK_SPEED_10G_KR:
8730 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8731 bp->link_params.req_line_speed = SPEED_10000;
8732 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8735 BNX2X_ERR("NVRAM config error. "
8736 "Invalid link_config 0x%x"
8737 " speed_cap_mask 0x%x\n",
8738 bp->port.link_config,
8739 bp->link_params.speed_cap_mask);
8745 BNX2X_ERR("NVRAM config error. "
8746 "BAD link speed link_config 0x%x\n",
8747 bp->port.link_config);
8748 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8749 bp->port.advertising = bp->port.supported;
8753 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8754 PORT_FEATURE_FLOW_CONTROL_MASK);
8755 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8756 !(bp->port.supported & SUPPORTED_Autoneg))
8757 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8759 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8760 " advertising 0x%x\n",
8761 bp->link_params.req_line_speed,
8762 bp->link_params.req_duplex,
8763 bp->link_params.req_flow_ctrl, bp->port.advertising);
8766 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8768 mac_hi = cpu_to_be16(mac_hi);
8769 mac_lo = cpu_to_be32(mac_lo);
8770 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8771 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8774 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8776 int port = BP_PORT(bp);
8782 bp->link_params.bp = bp;
8783 bp->link_params.port = port;
8785 bp->link_params.lane_config =
8786 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8787 bp->link_params.ext_phy_config =
8789 dev_info.port_hw_config[port].external_phy_config);
8790 /* BCM8727_NOC => BCM8727 no over current */
8791 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8792 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8793 bp->link_params.ext_phy_config &=
8794 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8795 bp->link_params.ext_phy_config |=
8796 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8797 bp->link_params.feature_config_flags |=
8798 FEATURE_CONFIG_BCM8727_NOC;
8801 bp->link_params.speed_cap_mask =
8803 dev_info.port_hw_config[port].speed_capability_mask);
8805 bp->port.link_config =
8806 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8808 /* Get the 4 lanes xgxs config rx and tx */
8809 for (i = 0; i < 2; i++) {
8811 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8812 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8813 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8816 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8817 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8818 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8821 /* If the device is capable of WoL, set the default state according
8824 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8825 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8826 (config & PORT_FEATURE_WOL_ENABLED));
8828 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8829 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8830 bp->link_params.lane_config,
8831 bp->link_params.ext_phy_config,
8832 bp->link_params.speed_cap_mask, bp->port.link_config);
8834 bp->link_params.switch_cfg |= (bp->port.link_config &
8835 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8836 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8838 bnx2x_link_settings_requested(bp);
8841 * If connected directly, work with the internal PHY, otherwise, work
8842 * with the external PHY
8844 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8845 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8846 bp->mdio.prtad = bp->link_params.phy_addr;
8848 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8849 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8851 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8853 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8854 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8855 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8856 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8857 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8860 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8861 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8862 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8866 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8868 int func = BP_FUNC(bp);
8872 bnx2x_get_common_hwinfo(bp);
8876 if (CHIP_IS_E1H(bp)) {
8878 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8880 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8881 FUNC_MF_CFG_E1HOV_TAG_MASK);
8882 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8884 BNX2X_DEV_INFO("%s function mode\n",
8885 IS_E1HMF(bp) ? "multi" : "single");
8888 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8890 FUNC_MF_CFG_E1HOV_TAG_MASK);
8891 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8893 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8895 func, bp->e1hov, bp->e1hov);
8897 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8898 " aborting\n", func);
8903 BNX2X_ERR("!!! VN %d in single function mode,"
8904 " aborting\n", BP_E1HVN(bp));
8910 if (!BP_NOMCP(bp)) {
8911 bnx2x_get_port_hwinfo(bp);
8913 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8914 DRV_MSG_SEQ_NUMBER_MASK);
8915 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8919 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8920 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8921 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8922 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8923 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8924 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8925 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8926 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8927 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8928 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8929 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8931 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8939 /* only supposed to happen on emulation/FPGA */
8940 BNX2X_ERR("warning random MAC workaround active\n");
8941 random_ether_addr(bp->dev->dev_addr);
8942 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8948 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8950 int func = BP_FUNC(bp);
8954 /* Disable interrupt handling until HW is initialized */
8955 atomic_set(&bp->intr_sem, 1);
8956 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8958 mutex_init(&bp->port.phy_mutex);
8960 mutex_init(&bp->cnic_mutex);
8963 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8964 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8966 rc = bnx2x_get_hwinfo(bp);
8968 /* need to reset chip if undi was active */
8970 bnx2x_undi_unload(bp);
8972 if (CHIP_REV_IS_FPGA(bp))
8973 printk(KERN_ERR PFX "FPGA detected\n");
8975 if (BP_NOMCP(bp) && (func == 0))
8977 "MCP disabled, must load devices in order!\n");
8979 /* Set multi queue mode */
8980 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8981 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8983 "Multi disabled since int_mode requested is not MSI-X\n");
8984 multi_mode = ETH_RSS_MODE_DISABLED;
8986 bp->multi_mode = multi_mode;
8991 bp->flags &= ~TPA_ENABLE_FLAG;
8992 bp->dev->features &= ~NETIF_F_LRO;
8994 bp->flags |= TPA_ENABLE_FLAG;
8995 bp->dev->features |= NETIF_F_LRO;
8999 bp->dropless_fc = 0;
9001 bp->dropless_fc = dropless_fc;
9005 bp->tx_ring_size = MAX_TX_AVAIL;
9006 bp->rx_ring_size = MAX_RX_AVAIL;
9013 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9014 bp->current_interval = (poll ? poll : timer_interval);
9016 init_timer(&bp->timer);
9017 bp->timer.expires = jiffies + bp->current_interval;
9018 bp->timer.data = (unsigned long) bp;
9019 bp->timer.function = bnx2x_timer;
9025 * ethtool service functions
9028 /* All ethtool functions called with rtnl_lock */
9030 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9032 struct bnx2x *bp = netdev_priv(dev);
9034 cmd->supported = bp->port.supported;
9035 cmd->advertising = bp->port.advertising;
9037 if (netif_carrier_ok(dev)) {
9038 cmd->speed = bp->link_vars.line_speed;
9039 cmd->duplex = bp->link_vars.duplex;
9044 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9045 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9046 if (vn_max_rate < cmd->speed)
9047 cmd->speed = vn_max_rate;
9054 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9056 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9058 switch (ext_phy_type) {
9059 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9060 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9061 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9062 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9063 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9064 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9065 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9066 cmd->port = PORT_FIBRE;
9069 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9071 cmd->port = PORT_TP;
9074 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9075 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9076 bp->link_params.ext_phy_config);
9080 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9081 bp->link_params.ext_phy_config);
9085 cmd->port = PORT_TP;
9087 cmd->phy_address = bp->mdio.prtad;
9088 cmd->transceiver = XCVR_INTERNAL;
9090 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9091 cmd->autoneg = AUTONEG_ENABLE;
9093 cmd->autoneg = AUTONEG_DISABLE;
9098 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9099 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9100 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9101 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9102 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9103 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9104 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9109 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9111 struct bnx2x *bp = netdev_priv(dev);
9117 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9118 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9119 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9120 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9121 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9122 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9123 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9125 if (cmd->autoneg == AUTONEG_ENABLE) {
9126 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9127 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9131 /* advertise the requested speed and duplex if supported */
9132 cmd->advertising &= bp->port.supported;
9134 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9135 bp->link_params.req_duplex = DUPLEX_FULL;
9136 bp->port.advertising |= (ADVERTISED_Autoneg |
9139 } else { /* forced speed */
9140 /* advertise the requested speed and duplex if supported */
9141 switch (cmd->speed) {
9143 if (cmd->duplex == DUPLEX_FULL) {
9144 if (!(bp->port.supported &
9145 SUPPORTED_10baseT_Full)) {
9147 "10M full not supported\n");
9151 advertising = (ADVERTISED_10baseT_Full |
9154 if (!(bp->port.supported &
9155 SUPPORTED_10baseT_Half)) {
9157 "10M half not supported\n");
9161 advertising = (ADVERTISED_10baseT_Half |
9167 if (cmd->duplex == DUPLEX_FULL) {
9168 if (!(bp->port.supported &
9169 SUPPORTED_100baseT_Full)) {
9171 "100M full not supported\n");
9175 advertising = (ADVERTISED_100baseT_Full |
9178 if (!(bp->port.supported &
9179 SUPPORTED_100baseT_Half)) {
9181 "100M half not supported\n");
9185 advertising = (ADVERTISED_100baseT_Half |
9191 if (cmd->duplex != DUPLEX_FULL) {
9192 DP(NETIF_MSG_LINK, "1G half not supported\n");
9196 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9197 DP(NETIF_MSG_LINK, "1G full not supported\n");
9201 advertising = (ADVERTISED_1000baseT_Full |
9206 if (cmd->duplex != DUPLEX_FULL) {
9208 "2.5G half not supported\n");
9212 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9214 "2.5G full not supported\n");
9218 advertising = (ADVERTISED_2500baseX_Full |
9223 if (cmd->duplex != DUPLEX_FULL) {
9224 DP(NETIF_MSG_LINK, "10G half not supported\n");
9228 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9229 DP(NETIF_MSG_LINK, "10G full not supported\n");
9233 advertising = (ADVERTISED_10000baseT_Full |
9238 DP(NETIF_MSG_LINK, "Unsupported speed\n");
9242 bp->link_params.req_line_speed = cmd->speed;
9243 bp->link_params.req_duplex = cmd->duplex;
9244 bp->port.advertising = advertising;
9247 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9248 DP_LEVEL " req_duplex %d advertising 0x%x\n",
9249 bp->link_params.req_line_speed, bp->link_params.req_duplex,
9250 bp->port.advertising);
9252 if (netif_running(dev)) {
9253 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9260 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9261 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9263 static int bnx2x_get_regs_len(struct net_device *dev)
9265 struct bnx2x *bp = netdev_priv(dev);
9266 int regdump_len = 0;
9269 if (CHIP_IS_E1(bp)) {
9270 for (i = 0; i < REGS_COUNT; i++)
9271 if (IS_E1_ONLINE(reg_addrs[i].info))
9272 regdump_len += reg_addrs[i].size;
9274 for (i = 0; i < WREGS_COUNT_E1; i++)
9275 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9276 regdump_len += wreg_addrs_e1[i].size *
9277 (1 + wreg_addrs_e1[i].read_regs_count);
9280 for (i = 0; i < REGS_COUNT; i++)
9281 if (IS_E1H_ONLINE(reg_addrs[i].info))
9282 regdump_len += reg_addrs[i].size;
9284 for (i = 0; i < WREGS_COUNT_E1H; i++)
9285 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9286 regdump_len += wreg_addrs_e1h[i].size *
9287 (1 + wreg_addrs_e1h[i].read_regs_count);
9290 regdump_len += sizeof(struct dump_hdr);
9295 static void bnx2x_get_regs(struct net_device *dev,
9296 struct ethtool_regs *regs, void *_p)
9299 struct bnx2x *bp = netdev_priv(dev);
9300 struct dump_hdr dump_hdr = {0};
9303 memset(p, 0, regs->len);
9305 if (!netif_running(bp->dev))
9308 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9309 dump_hdr.dump_sign = dump_sign_all;
9310 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9311 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9312 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9313 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9314 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9316 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9317 p += dump_hdr.hdr_size + 1;
9319 if (CHIP_IS_E1(bp)) {
9320 for (i = 0; i < REGS_COUNT; i++)
9321 if (IS_E1_ONLINE(reg_addrs[i].info))
9322 for (j = 0; j < reg_addrs[i].size; j++)
9324 reg_addrs[i].addr + j*4);
9327 for (i = 0; i < REGS_COUNT; i++)
9328 if (IS_E1H_ONLINE(reg_addrs[i].info))
9329 for (j = 0; j < reg_addrs[i].size; j++)
9331 reg_addrs[i].addr + j*4);
9335 #define PHY_FW_VER_LEN 10
9337 static void bnx2x_get_drvinfo(struct net_device *dev,
9338 struct ethtool_drvinfo *info)
9340 struct bnx2x *bp = netdev_priv(dev);
9341 u8 phy_fw_ver[PHY_FW_VER_LEN];
9343 strcpy(info->driver, DRV_MODULE_NAME);
9344 strcpy(info->version, DRV_MODULE_VERSION);
9346 phy_fw_ver[0] = '\0';
9348 bnx2x_acquire_phy_lock(bp);
9349 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9350 (bp->state != BNX2X_STATE_CLOSED),
9351 phy_fw_ver, PHY_FW_VER_LEN);
9352 bnx2x_release_phy_lock(bp);
9355 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9356 (bp->common.bc_ver & 0xff0000) >> 16,
9357 (bp->common.bc_ver & 0xff00) >> 8,
9358 (bp->common.bc_ver & 0xff),
9359 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9360 strcpy(info->bus_info, pci_name(bp->pdev));
9361 info->n_stats = BNX2X_NUM_STATS;
9362 info->testinfo_len = BNX2X_NUM_TESTS;
9363 info->eedump_len = bp->common.flash_size;
9364 info->regdump_len = bnx2x_get_regs_len(dev);
9367 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9369 struct bnx2x *bp = netdev_priv(dev);
9371 if (bp->flags & NO_WOL_FLAG) {
9375 wol->supported = WAKE_MAGIC;
9377 wol->wolopts = WAKE_MAGIC;
9381 memset(&wol->sopass, 0, sizeof(wol->sopass));
9384 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9386 struct bnx2x *bp = netdev_priv(dev);
9388 if (wol->wolopts & ~WAKE_MAGIC)
9391 if (wol->wolopts & WAKE_MAGIC) {
9392 if (bp->flags & NO_WOL_FLAG)
9402 static u32 bnx2x_get_msglevel(struct net_device *dev)
9404 struct bnx2x *bp = netdev_priv(dev);
9406 return bp->msglevel;
9409 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9411 struct bnx2x *bp = netdev_priv(dev);
9413 if (capable(CAP_NET_ADMIN))
9414 bp->msglevel = level;
9417 static int bnx2x_nway_reset(struct net_device *dev)
9419 struct bnx2x *bp = netdev_priv(dev);
9424 if (netif_running(dev)) {
9425 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9432 static u32 bnx2x_get_link(struct net_device *dev)
9434 struct bnx2x *bp = netdev_priv(dev);
9436 return bp->link_vars.link_up;
9439 static int bnx2x_get_eeprom_len(struct net_device *dev)
9441 struct bnx2x *bp = netdev_priv(dev);
9443 return bp->common.flash_size;
9446 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9448 int port = BP_PORT(bp);
9452 /* adjust timeout for emulation/FPGA */
9453 count = NVRAM_TIMEOUT_COUNT;
9454 if (CHIP_REV_IS_SLOW(bp))
9457 /* request access to nvram interface */
9458 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9459 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9461 for (i = 0; i < count*10; i++) {
9462 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9463 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9469 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9470 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9477 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9479 int port = BP_PORT(bp);
9483 /* adjust timeout for emulation/FPGA */
9484 count = NVRAM_TIMEOUT_COUNT;
9485 if (CHIP_REV_IS_SLOW(bp))
9488 /* relinquish nvram interface */
9489 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9490 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9492 for (i = 0; i < count*10; i++) {
9493 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9494 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9500 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9501 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9508 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9512 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9514 /* enable both bits, even on read */
9515 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9516 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9517 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9520 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9524 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9526 /* disable both bits, even after read */
9527 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9528 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9529 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9532 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9538 /* build the command word */
9539 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9541 /* need to clear DONE bit separately */
9542 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9544 /* address of the NVRAM to read from */
9545 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9546 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9548 /* issue a read command */
9549 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9551 /* adjust timeout for emulation/FPGA */
9552 count = NVRAM_TIMEOUT_COUNT;
9553 if (CHIP_REV_IS_SLOW(bp))
9556 /* wait for completion */
9559 for (i = 0; i < count; i++) {
9561 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9563 if (val & MCPR_NVM_COMMAND_DONE) {
9564 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9565 /* we read nvram data in cpu order
9566 * but ethtool sees it as an array of bytes
9567 * converting to big-endian will do the work */
9568 *ret_val = cpu_to_be32(val);
9577 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9584 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9586 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9591 if (offset + buf_size > bp->common.flash_size) {
9592 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9593 " buf_size (0x%x) > flash_size (0x%x)\n",
9594 offset, buf_size, bp->common.flash_size);
9598 /* request access to nvram interface */
9599 rc = bnx2x_acquire_nvram_lock(bp);
9603 /* enable access to nvram interface */
9604 bnx2x_enable_nvram_access(bp);
9606 /* read the first word(s) */
9607 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9608 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9609 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9610 memcpy(ret_buf, &val, 4);
9612 /* advance to the next dword */
9613 offset += sizeof(u32);
9614 ret_buf += sizeof(u32);
9615 buf_size -= sizeof(u32);
9620 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9621 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9622 memcpy(ret_buf, &val, 4);
9625 /* disable access to nvram interface */
9626 bnx2x_disable_nvram_access(bp);
9627 bnx2x_release_nvram_lock(bp);
9632 static int bnx2x_get_eeprom(struct net_device *dev,
9633 struct ethtool_eeprom *eeprom, u8 *eebuf)
9635 struct bnx2x *bp = netdev_priv(dev);
9638 if (!netif_running(dev))
9641 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9642 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9643 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9644 eeprom->len, eeprom->len);
9646 /* parameters already validated in ethtool_get_eeprom */
9648 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9653 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9658 /* build the command word */
9659 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9661 /* need to clear DONE bit separately */
9662 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9664 /* write the data */
9665 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9667 /* address of the NVRAM to write to */
9668 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9669 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9671 /* issue the write command */
9672 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9674 /* adjust timeout for emulation/FPGA */
9675 count = NVRAM_TIMEOUT_COUNT;
9676 if (CHIP_REV_IS_SLOW(bp))
9679 /* wait for completion */
9681 for (i = 0; i < count; i++) {
9683 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9684 if (val & MCPR_NVM_COMMAND_DONE) {
9693 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9695 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9703 if (offset + buf_size > bp->common.flash_size) {
9704 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9705 " buf_size (0x%x) > flash_size (0x%x)\n",
9706 offset, buf_size, bp->common.flash_size);
9710 /* request access to nvram interface */
9711 rc = bnx2x_acquire_nvram_lock(bp);
9715 /* enable access to nvram interface */
9716 bnx2x_enable_nvram_access(bp);
9718 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9719 align_offset = (offset & ~0x03);
9720 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9723 val &= ~(0xff << BYTE_OFFSET(offset));
9724 val |= (*data_buf << BYTE_OFFSET(offset));
9726 /* nvram data is returned as an array of bytes
9727 * convert it back to cpu order */
9728 val = be32_to_cpu(val);
9730 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9734 /* disable access to nvram interface */
9735 bnx2x_disable_nvram_access(bp);
9736 bnx2x_release_nvram_lock(bp);
9741 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9749 if (buf_size == 1) /* ethtool */
9750 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9752 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9754 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9759 if (offset + buf_size > bp->common.flash_size) {
9760 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9761 " buf_size (0x%x) > flash_size (0x%x)\n",
9762 offset, buf_size, bp->common.flash_size);
9766 /* request access to nvram interface */
9767 rc = bnx2x_acquire_nvram_lock(bp);
9771 /* enable access to nvram interface */
9772 bnx2x_enable_nvram_access(bp);
9775 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9776 while ((written_so_far < buf_size) && (rc == 0)) {
9777 if (written_so_far == (buf_size - sizeof(u32)))
9778 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9779 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9780 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9781 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9782 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9784 memcpy(&val, data_buf, 4);
9786 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9788 /* advance to the next dword */
9789 offset += sizeof(u32);
9790 data_buf += sizeof(u32);
9791 written_so_far += sizeof(u32);
9795 /* disable access to nvram interface */
9796 bnx2x_disable_nvram_access(bp);
9797 bnx2x_release_nvram_lock(bp);
9802 static int bnx2x_set_eeprom(struct net_device *dev,
9803 struct ethtool_eeprom *eeprom, u8 *eebuf)
9805 struct bnx2x *bp = netdev_priv(dev);
9806 int port = BP_PORT(bp);
9809 if (!netif_running(dev))
9812 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9813 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9814 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9815 eeprom->len, eeprom->len);
9817 /* parameters already validated in ethtool_set_eeprom */
9819 /* PHY eeprom can be accessed only by the PMF */
9820 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9824 if (eeprom->magic == 0x50485950) {
9825 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9826 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9828 bnx2x_acquire_phy_lock(bp);
9829 rc |= bnx2x_link_reset(&bp->link_params,
9831 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9832 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9833 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9834 MISC_REGISTERS_GPIO_HIGH, port);
9835 bnx2x_release_phy_lock(bp);
9836 bnx2x_link_report(bp);
9838 } else if (eeprom->magic == 0x50485952) {
9839 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9840 if ((bp->state == BNX2X_STATE_OPEN) ||
9841 (bp->state == BNX2X_STATE_DISABLED)) {
9842 bnx2x_acquire_phy_lock(bp);
9843 rc |= bnx2x_link_reset(&bp->link_params,
9846 rc |= bnx2x_phy_init(&bp->link_params,
9848 bnx2x_release_phy_lock(bp);
9849 bnx2x_calc_fc_adv(bp);
9851 } else if (eeprom->magic == 0x53985943) {
9852 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9853 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9854 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9856 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9858 /* DSP Remove Download Mode */
9859 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9860 MISC_REGISTERS_GPIO_LOW, port);
9862 bnx2x_acquire_phy_lock(bp);
9864 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9866 /* wait 0.5 sec to allow it to run */
9868 bnx2x_ext_phy_hw_reset(bp, port);
9870 bnx2x_release_phy_lock(bp);
9873 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9878 static int bnx2x_get_coalesce(struct net_device *dev,
9879 struct ethtool_coalesce *coal)
9881 struct bnx2x *bp = netdev_priv(dev);
9883 memset(coal, 0, sizeof(struct ethtool_coalesce));
9885 coal->rx_coalesce_usecs = bp->rx_ticks;
9886 coal->tx_coalesce_usecs = bp->tx_ticks;
9891 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9892 static int bnx2x_set_coalesce(struct net_device *dev,
9893 struct ethtool_coalesce *coal)
9895 struct bnx2x *bp = netdev_priv(dev);
9897 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9898 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9899 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9901 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9902 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9903 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9905 if (netif_running(dev))
9906 bnx2x_update_coalesce(bp);
9911 static void bnx2x_get_ringparam(struct net_device *dev,
9912 struct ethtool_ringparam *ering)
9914 struct bnx2x *bp = netdev_priv(dev);
9916 ering->rx_max_pending = MAX_RX_AVAIL;
9917 ering->rx_mini_max_pending = 0;
9918 ering->rx_jumbo_max_pending = 0;
9920 ering->rx_pending = bp->rx_ring_size;
9921 ering->rx_mini_pending = 0;
9922 ering->rx_jumbo_pending = 0;
9924 ering->tx_max_pending = MAX_TX_AVAIL;
9925 ering->tx_pending = bp->tx_ring_size;
9928 static int bnx2x_set_ringparam(struct net_device *dev,
9929 struct ethtool_ringparam *ering)
9931 struct bnx2x *bp = netdev_priv(dev);
9934 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9935 (ering->tx_pending > MAX_TX_AVAIL) ||
9936 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9939 bp->rx_ring_size = ering->rx_pending;
9940 bp->tx_ring_size = ering->tx_pending;
9942 if (netif_running(dev)) {
9943 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9944 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9950 static void bnx2x_get_pauseparam(struct net_device *dev,
9951 struct ethtool_pauseparam *epause)
9953 struct bnx2x *bp = netdev_priv(dev);
9955 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9956 BNX2X_FLOW_CTRL_AUTO) &&
9957 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9959 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9960 BNX2X_FLOW_CTRL_RX);
9961 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9962 BNX2X_FLOW_CTRL_TX);
9964 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9965 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9966 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9969 static int bnx2x_set_pauseparam(struct net_device *dev,
9970 struct ethtool_pauseparam *epause)
9972 struct bnx2x *bp = netdev_priv(dev);
9977 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9978 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9979 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9981 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9983 if (epause->rx_pause)
9984 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9986 if (epause->tx_pause)
9987 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9989 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9990 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9992 if (epause->autoneg) {
9993 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9994 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9998 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9999 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10003 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10005 if (netif_running(dev)) {
10006 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10007 bnx2x_link_set(bp);
10013 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10015 struct bnx2x *bp = netdev_priv(dev);
10019 /* TPA requires Rx CSUM offloading */
10020 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10021 if (!(dev->features & NETIF_F_LRO)) {
10022 dev->features |= NETIF_F_LRO;
10023 bp->flags |= TPA_ENABLE_FLAG;
10027 } else if (dev->features & NETIF_F_LRO) {
10028 dev->features &= ~NETIF_F_LRO;
10029 bp->flags &= ~TPA_ENABLE_FLAG;
10033 if (changed && netif_running(dev)) {
10034 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10035 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10041 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10043 struct bnx2x *bp = netdev_priv(dev);
10045 return bp->rx_csum;
10048 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10050 struct bnx2x *bp = netdev_priv(dev);
10053 bp->rx_csum = data;
10055 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10056 TPA'ed packets will be discarded due to wrong TCP CSUM */
10058 u32 flags = ethtool_op_get_flags(dev);
10060 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10066 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10069 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10070 dev->features |= NETIF_F_TSO6;
10072 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10073 dev->features &= ~NETIF_F_TSO6;
10079 static const struct {
10080 char string[ETH_GSTRING_LEN];
10081 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10082 { "register_test (offline)" },
10083 { "memory_test (offline)" },
10084 { "loopback_test (offline)" },
10085 { "nvram_test (online)" },
10086 { "interrupt_test (online)" },
10087 { "link_test (online)" },
10088 { "idle check (online)" }
10091 static int bnx2x_test_registers(struct bnx2x *bp)
10093 int idx, i, rc = -ENODEV;
10095 int port = BP_PORT(bp);
10096 static const struct {
10101 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10102 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10103 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10104 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10105 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10106 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10107 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10108 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10109 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10110 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10111 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10112 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10113 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10114 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10115 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10116 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10117 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10118 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
10119 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
10120 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10121 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
10122 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10123 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10124 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10125 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10126 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10127 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10128 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10129 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
10130 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10131 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
10132 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10133 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10134 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10135 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10136 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10137 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10139 { 0xffffffff, 0, 0x00000000 }
10142 if (!netif_running(bp->dev))
10145 /* Repeat the test twice:
10146 First by writing 0x00000000, second by writing 0xffffffff */
10147 for (idx = 0; idx < 2; idx++) {
10154 wr_val = 0xffffffff;
10158 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10159 u32 offset, mask, save_val, val;
10161 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10162 mask = reg_tbl[i].mask;
10164 save_val = REG_RD(bp, offset);
10166 REG_WR(bp, offset, wr_val);
10167 val = REG_RD(bp, offset);
10169 /* Restore the original register's value */
10170 REG_WR(bp, offset, save_val);
10172 /* verify that value is as expected value */
10173 if ((val & mask) != (wr_val & mask))
10174 goto test_reg_exit;
10184 static int bnx2x_test_memory(struct bnx2x *bp)
10186 int i, j, rc = -ENODEV;
10188 static const struct {
10192 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10193 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10194 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10195 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10196 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10197 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10198 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10202 static const struct {
10208 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10209 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10210 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10211 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10212 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10213 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10215 { NULL, 0xffffffff, 0, 0 }
10218 if (!netif_running(bp->dev))
10221 /* Go through all the memories */
10222 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10223 for (j = 0; j < mem_tbl[i].size; j++)
10224 REG_RD(bp, mem_tbl[i].offset + j*4);
10226 /* Check the parity status */
10227 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10228 val = REG_RD(bp, prty_tbl[i].offset);
10229 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10230 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10232 "%s is 0x%x\n", prty_tbl[i].name, val);
10233 goto test_mem_exit;
10243 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10248 while (bnx2x_link_test(bp) && cnt--)
10252 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10254 unsigned int pkt_size, num_pkts, i;
10255 struct sk_buff *skb;
10256 unsigned char *packet;
10257 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10258 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10259 u16 tx_start_idx, tx_idx;
10260 u16 rx_start_idx, rx_idx;
10261 u16 pkt_prod, bd_prod;
10262 struct sw_tx_bd *tx_buf;
10263 struct eth_tx_start_bd *tx_start_bd;
10264 struct eth_tx_parse_bd *pbd = NULL;
10265 dma_addr_t mapping;
10266 union eth_rx_cqe *cqe;
10268 struct sw_rx_bd *rx_buf;
10272 /* check the loopback mode */
10273 switch (loopback_mode) {
10274 case BNX2X_PHY_LOOPBACK:
10275 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10278 case BNX2X_MAC_LOOPBACK:
10279 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10280 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10286 /* prepare the loopback packet */
10287 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10288 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10289 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10292 goto test_loopback_exit;
10294 packet = skb_put(skb, pkt_size);
10295 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10296 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10297 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10298 for (i = ETH_HLEN; i < pkt_size; i++)
10299 packet[i] = (unsigned char) (i & 0xff);
10301 /* send the loopback packet */
10303 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10304 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10306 pkt_prod = fp_tx->tx_pkt_prod++;
10307 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10308 tx_buf->first_bd = fp_tx->tx_bd_prod;
10312 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10313 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10314 mapping = pci_map_single(bp->pdev, skb->data,
10315 skb_headlen(skb), PCI_DMA_TODEVICE);
10316 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10317 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10318 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10319 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10320 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10321 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10322 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10323 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10325 /* turn on parsing and get a BD */
10326 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10327 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10329 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10333 fp_tx->tx_db.data.prod += 2;
10335 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10340 fp_tx->tx_bd_prod += 2; /* start + pbd */
10341 bp->dev->trans_start = jiffies;
10345 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10346 if (tx_idx != tx_start_idx + num_pkts)
10347 goto test_loopback_exit;
10349 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10350 if (rx_idx != rx_start_idx + num_pkts)
10351 goto test_loopback_exit;
10353 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10354 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10355 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10356 goto test_loopback_rx_exit;
10358 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10359 if (len != pkt_size)
10360 goto test_loopback_rx_exit;
10362 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10364 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10365 for (i = ETH_HLEN; i < pkt_size; i++)
10366 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10367 goto test_loopback_rx_exit;
10371 test_loopback_rx_exit:
10373 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10374 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10375 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10376 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10378 /* Update producers */
10379 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10380 fp_rx->rx_sge_prod);
10382 test_loopback_exit:
10383 bp->link_params.loopback_mode = LOOPBACK_NONE;
10388 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10392 if (!netif_running(bp->dev))
10393 return BNX2X_LOOPBACK_FAILED;
10395 bnx2x_netif_stop(bp, 1);
10396 bnx2x_acquire_phy_lock(bp);
10398 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10400 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10401 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10404 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10406 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10407 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10410 bnx2x_release_phy_lock(bp);
10411 bnx2x_netif_start(bp);
10416 #define CRC32_RESIDUAL 0xdebb20e3
10418 static int bnx2x_test_nvram(struct bnx2x *bp)
10420 static const struct {
10424 { 0, 0x14 }, /* bootstrap */
10425 { 0x14, 0xec }, /* dir */
10426 { 0x100, 0x350 }, /* manuf_info */
10427 { 0x450, 0xf0 }, /* feature_info */
10428 { 0x640, 0x64 }, /* upgrade_key_info */
10430 { 0x708, 0x70 }, /* manuf_key_info */
10434 __be32 buf[0x350 / 4];
10435 u8 *data = (u8 *)buf;
10439 rc = bnx2x_nvram_read(bp, 0, data, 4);
10441 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10442 goto test_nvram_exit;
10445 magic = be32_to_cpu(buf[0]);
10446 if (magic != 0x669955aa) {
10447 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10449 goto test_nvram_exit;
10452 for (i = 0; nvram_tbl[i].size; i++) {
10454 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10455 nvram_tbl[i].size);
10457 DP(NETIF_MSG_PROBE,
10458 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10459 goto test_nvram_exit;
10462 crc = ether_crc_le(nvram_tbl[i].size, data);
10463 if (crc != CRC32_RESIDUAL) {
10464 DP(NETIF_MSG_PROBE,
10465 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10467 goto test_nvram_exit;
10475 static int bnx2x_test_intr(struct bnx2x *bp)
10477 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10480 if (!netif_running(bp->dev))
10483 config->hdr.length = 0;
10484 if (CHIP_IS_E1(bp))
10485 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10487 config->hdr.offset = BP_FUNC(bp);
10488 config->hdr.client_id = bp->fp->cl_id;
10489 config->hdr.reserved1 = 0;
10491 bp->set_mac_pending++;
10493 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10494 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10495 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10497 for (i = 0; i < 10; i++) {
10498 if (!bp->set_mac_pending)
10501 msleep_interruptible(10);
10510 static void bnx2x_self_test(struct net_device *dev,
10511 struct ethtool_test *etest, u64 *buf)
10513 struct bnx2x *bp = netdev_priv(dev);
10515 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10517 if (!netif_running(dev))
10520 /* offline tests are not supported in MF mode */
10522 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10524 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10525 int port = BP_PORT(bp);
10529 /* save current value of input enable for TX port IF */
10530 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10531 /* disable input for TX port IF */
10532 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10534 link_up = bp->link_vars.link_up;
10535 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10536 bnx2x_nic_load(bp, LOAD_DIAG);
10537 /* wait until link state is restored */
10538 bnx2x_wait_for_link(bp, link_up);
10540 if (bnx2x_test_registers(bp) != 0) {
10542 etest->flags |= ETH_TEST_FL_FAILED;
10544 if (bnx2x_test_memory(bp) != 0) {
10546 etest->flags |= ETH_TEST_FL_FAILED;
10548 buf[2] = bnx2x_test_loopback(bp, link_up);
10550 etest->flags |= ETH_TEST_FL_FAILED;
10552 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10554 /* restore input for TX port IF */
10555 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10557 bnx2x_nic_load(bp, LOAD_NORMAL);
10558 /* wait until link state is restored */
10559 bnx2x_wait_for_link(bp, link_up);
10561 if (bnx2x_test_nvram(bp) != 0) {
10563 etest->flags |= ETH_TEST_FL_FAILED;
10565 if (bnx2x_test_intr(bp) != 0) {
10567 etest->flags |= ETH_TEST_FL_FAILED;
10570 if (bnx2x_link_test(bp) != 0) {
10572 etest->flags |= ETH_TEST_FL_FAILED;
10575 #ifdef BNX2X_EXTRA_DEBUG
10576 bnx2x_panic_dump(bp);
10580 static const struct {
10583 u8 string[ETH_GSTRING_LEN];
10584 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10585 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10586 { Q_STATS_OFFSET32(error_bytes_received_hi),
10587 8, "[%d]: rx_error_bytes" },
10588 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10589 8, "[%d]: rx_ucast_packets" },
10590 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10591 8, "[%d]: rx_mcast_packets" },
10592 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10593 8, "[%d]: rx_bcast_packets" },
10594 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10595 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10596 4, "[%d]: rx_phy_ip_err_discards"},
10597 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10598 4, "[%d]: rx_skb_alloc_discard" },
10599 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10601 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10602 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10603 8, "[%d]: tx_packets" }
10606 static const struct {
10610 #define STATS_FLAGS_PORT 1
10611 #define STATS_FLAGS_FUNC 2
10612 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10613 u8 string[ETH_GSTRING_LEN];
10614 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10615 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10616 8, STATS_FLAGS_BOTH, "rx_bytes" },
10617 { STATS_OFFSET32(error_bytes_received_hi),
10618 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10619 { STATS_OFFSET32(total_unicast_packets_received_hi),
10620 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10621 { STATS_OFFSET32(total_multicast_packets_received_hi),
10622 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10623 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10624 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10625 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10626 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10627 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10628 8, STATS_FLAGS_PORT, "rx_align_errors" },
10629 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10630 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10631 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10632 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10633 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10634 8, STATS_FLAGS_PORT, "rx_fragments" },
10635 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10636 8, STATS_FLAGS_PORT, "rx_jabbers" },
10637 { STATS_OFFSET32(no_buff_discard_hi),
10638 8, STATS_FLAGS_BOTH, "rx_discards" },
10639 { STATS_OFFSET32(mac_filter_discard),
10640 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10641 { STATS_OFFSET32(xxoverflow_discard),
10642 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10643 { STATS_OFFSET32(brb_drop_hi),
10644 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10645 { STATS_OFFSET32(brb_truncate_hi),
10646 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10647 { STATS_OFFSET32(pause_frames_received_hi),
10648 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10649 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10650 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10651 { STATS_OFFSET32(nig_timer_max),
10652 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10653 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10654 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10655 { STATS_OFFSET32(rx_skb_alloc_failed),
10656 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10657 { STATS_OFFSET32(hw_csum_err),
10658 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10660 { STATS_OFFSET32(total_bytes_transmitted_hi),
10661 8, STATS_FLAGS_BOTH, "tx_bytes" },
10662 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10663 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10664 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10665 8, STATS_FLAGS_BOTH, "tx_packets" },
10666 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10667 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10668 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10669 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10670 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10671 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10672 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10673 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10674 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10675 8, STATS_FLAGS_PORT, "tx_deferred" },
10676 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10677 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10678 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10679 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10680 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10681 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10682 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10683 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10684 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10685 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10686 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10687 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10688 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10689 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10690 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10691 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10692 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10693 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10694 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10695 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10696 { STATS_OFFSET32(pause_frames_sent_hi),
10697 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10700 #define IS_PORT_STAT(i) \
10701 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10702 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10703 #define IS_E1HMF_MODE_STAT(bp) \
10704 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10706 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10708 struct bnx2x *bp = netdev_priv(dev);
10711 switch(stringset) {
10713 if (is_multi(bp)) {
10714 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10715 if (!IS_E1HMF_MODE_STAT(bp))
10716 num_stats += BNX2X_NUM_STATS;
10718 if (IS_E1HMF_MODE_STAT(bp)) {
10720 for (i = 0; i < BNX2X_NUM_STATS; i++)
10721 if (IS_FUNC_STAT(i))
10724 num_stats = BNX2X_NUM_STATS;
10729 return BNX2X_NUM_TESTS;
10736 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10738 struct bnx2x *bp = netdev_priv(dev);
10741 switch (stringset) {
10743 if (is_multi(bp)) {
10745 for_each_rx_queue(bp, i) {
10746 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10747 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10748 bnx2x_q_stats_arr[j].string, i);
10749 k += BNX2X_NUM_Q_STATS;
10751 if (IS_E1HMF_MODE_STAT(bp))
10753 for (j = 0; j < BNX2X_NUM_STATS; j++)
10754 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10755 bnx2x_stats_arr[j].string);
10757 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10758 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10760 strcpy(buf + j*ETH_GSTRING_LEN,
10761 bnx2x_stats_arr[i].string);
10768 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10773 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10774 struct ethtool_stats *stats, u64 *buf)
10776 struct bnx2x *bp = netdev_priv(dev);
10777 u32 *hw_stats, *offset;
10780 if (is_multi(bp)) {
10782 for_each_rx_queue(bp, i) {
10783 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10784 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10785 if (bnx2x_q_stats_arr[j].size == 0) {
10786 /* skip this counter */
10790 offset = (hw_stats +
10791 bnx2x_q_stats_arr[j].offset);
10792 if (bnx2x_q_stats_arr[j].size == 4) {
10793 /* 4-byte counter */
10794 buf[k + j] = (u64) *offset;
10797 /* 8-byte counter */
10798 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10800 k += BNX2X_NUM_Q_STATS;
10802 if (IS_E1HMF_MODE_STAT(bp))
10804 hw_stats = (u32 *)&bp->eth_stats;
10805 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10806 if (bnx2x_stats_arr[j].size == 0) {
10807 /* skip this counter */
10811 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10812 if (bnx2x_stats_arr[j].size == 4) {
10813 /* 4-byte counter */
10814 buf[k + j] = (u64) *offset;
10817 /* 8-byte counter */
10818 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10821 hw_stats = (u32 *)&bp->eth_stats;
10822 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10823 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10825 if (bnx2x_stats_arr[i].size == 0) {
10826 /* skip this counter */
10831 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10832 if (bnx2x_stats_arr[i].size == 4) {
10833 /* 4-byte counter */
10834 buf[j] = (u64) *offset;
10838 /* 8-byte counter */
10839 buf[j] = HILO_U64(*offset, *(offset + 1));
10845 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10847 struct bnx2x *bp = netdev_priv(dev);
10848 int port = BP_PORT(bp);
10851 if (!netif_running(dev))
10860 for (i = 0; i < (data * 2); i++) {
10862 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10863 bp->link_params.hw_led_mode,
10864 bp->link_params.chip_id);
10866 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10867 bp->link_params.hw_led_mode,
10868 bp->link_params.chip_id);
10870 msleep_interruptible(500);
10871 if (signal_pending(current))
10875 if (bp->link_vars.link_up)
10876 bnx2x_set_led(bp, port, LED_MODE_OPER,
10877 bp->link_vars.line_speed,
10878 bp->link_params.hw_led_mode,
10879 bp->link_params.chip_id);
10884 static const struct ethtool_ops bnx2x_ethtool_ops = {
10885 .get_settings = bnx2x_get_settings,
10886 .set_settings = bnx2x_set_settings,
10887 .get_drvinfo = bnx2x_get_drvinfo,
10888 .get_regs_len = bnx2x_get_regs_len,
10889 .get_regs = bnx2x_get_regs,
10890 .get_wol = bnx2x_get_wol,
10891 .set_wol = bnx2x_set_wol,
10892 .get_msglevel = bnx2x_get_msglevel,
10893 .set_msglevel = bnx2x_set_msglevel,
10894 .nway_reset = bnx2x_nway_reset,
10895 .get_link = bnx2x_get_link,
10896 .get_eeprom_len = bnx2x_get_eeprom_len,
10897 .get_eeprom = bnx2x_get_eeprom,
10898 .set_eeprom = bnx2x_set_eeprom,
10899 .get_coalesce = bnx2x_get_coalesce,
10900 .set_coalesce = bnx2x_set_coalesce,
10901 .get_ringparam = bnx2x_get_ringparam,
10902 .set_ringparam = bnx2x_set_ringparam,
10903 .get_pauseparam = bnx2x_get_pauseparam,
10904 .set_pauseparam = bnx2x_set_pauseparam,
10905 .get_rx_csum = bnx2x_get_rx_csum,
10906 .set_rx_csum = bnx2x_set_rx_csum,
10907 .get_tx_csum = ethtool_op_get_tx_csum,
10908 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10909 .set_flags = bnx2x_set_flags,
10910 .get_flags = ethtool_op_get_flags,
10911 .get_sg = ethtool_op_get_sg,
10912 .set_sg = ethtool_op_set_sg,
10913 .get_tso = ethtool_op_get_tso,
10914 .set_tso = bnx2x_set_tso,
10915 .self_test = bnx2x_self_test,
10916 .get_sset_count = bnx2x_get_sset_count,
10917 .get_strings = bnx2x_get_strings,
10918 .phys_id = bnx2x_phys_id,
10919 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10922 /* end of ethtool_ops */
10924 /****************************************************************************
10925 * General service functions
10926 ****************************************************************************/
10928 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10932 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10936 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10937 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10938 PCI_PM_CTRL_PME_STATUS));
10940 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10941 /* delay required during transition out of D3hot */
10946 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10950 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10952 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10955 /* No more memory access after this point until
10956 * device is brought back to D0.
10966 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10970 /* Tell compiler that status block fields can change */
10972 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10973 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10975 return (fp->rx_comp_cons != rx_cons_sb);
10979 * net_device service functions
10982 static int bnx2x_poll(struct napi_struct *napi, int budget)
10984 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10986 struct bnx2x *bp = fp->bp;
10989 #ifdef BNX2X_STOP_ON_ERROR
10990 if (unlikely(bp->panic))
10994 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10995 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10997 bnx2x_update_fpsb_idx(fp);
10999 if (bnx2x_has_rx_work(fp)) {
11000 work_done = bnx2x_rx_int(fp, budget);
11002 /* must not complete if we consumed full budget */
11003 if (work_done >= budget)
11007 /* bnx2x_has_rx_work() reads the status block, thus we need to
11008 * ensure that status block indices have been actually read
11009 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
11010 * so that we won't write the "newer" value of the status block to IGU
11011 * (if there was a DMA right after bnx2x_has_rx_work and
11012 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11013 * may be postponed to right before bnx2x_ack_sb). In this case
11014 * there will never be another interrupt until there is another update
11015 * of the status block, while there is still unhandled work.
11019 if (!bnx2x_has_rx_work(fp)) {
11020 #ifdef BNX2X_STOP_ON_ERROR
11023 napi_complete(napi);
11025 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11026 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
11027 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11028 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11036 /* we split the first BD into headers and data BDs
11037 * to ease the pain of our fellow microcode engineers
11038 * we use one mapping for both BDs
11039 * So far this has only been observed to happen
11040 * in Other Operating Systems(TM)
11042 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11043 struct bnx2x_fastpath *fp,
11044 struct sw_tx_bd *tx_buf,
11045 struct eth_tx_start_bd **tx_bd, u16 hlen,
11046 u16 bd_prod, int nbd)
11048 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11049 struct eth_tx_bd *d_tx_bd;
11050 dma_addr_t mapping;
11051 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11053 /* first fix first BD */
11054 h_tx_bd->nbd = cpu_to_le16(nbd);
11055 h_tx_bd->nbytes = cpu_to_le16(hlen);
11057 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11058 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11059 h_tx_bd->addr_lo, h_tx_bd->nbd);
11061 /* now get a new data BD
11062 * (after the pbd) and fill it */
11063 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11064 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11066 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11067 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11069 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11070 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11071 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11073 /* this marks the BD as one that has no individual mapping */
11074 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11076 DP(NETIF_MSG_TX_QUEUED,
11077 "TSO split data size is %d (%x:%x)\n",
11078 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11081 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11086 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11089 csum = (u16) ~csum_fold(csum_sub(csum,
11090 csum_partial(t_header - fix, fix, 0)));
11093 csum = (u16) ~csum_fold(csum_add(csum,
11094 csum_partial(t_header, -fix, 0)));
11096 return swab16(csum);
11099 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11103 if (skb->ip_summed != CHECKSUM_PARTIAL)
11107 if (skb->protocol == htons(ETH_P_IPV6)) {
11109 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11110 rc |= XMIT_CSUM_TCP;
11114 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11115 rc |= XMIT_CSUM_TCP;
11119 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11122 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11128 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11129 /* check if packet requires linearization (packet is too fragmented)
11130 no need to check fragmentation if page size > 8K (there will be no
11131 violation to FW restrictions) */
11132 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11137 int first_bd_sz = 0;
11139 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11140 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11142 if (xmit_type & XMIT_GSO) {
11143 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11144 /* Check if LSO packet needs to be copied:
11145 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11146 int wnd_size = MAX_FETCH_BD - 3;
11147 /* Number of windows to check */
11148 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11153 /* Headers length */
11154 hlen = (int)(skb_transport_header(skb) - skb->data) +
11157 /* Amount of data (w/o headers) on linear part of SKB*/
11158 first_bd_sz = skb_headlen(skb) - hlen;
11160 wnd_sum = first_bd_sz;
11162 /* Calculate the first sum - it's special */
11163 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11165 skb_shinfo(skb)->frags[frag_idx].size;
11167 /* If there was data on linear skb data - check it */
11168 if (first_bd_sz > 0) {
11169 if (unlikely(wnd_sum < lso_mss)) {
11174 wnd_sum -= first_bd_sz;
11177 /* Others are easier: run through the frag list and
11178 check all windows */
11179 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11181 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11183 if (unlikely(wnd_sum < lso_mss)) {
11188 skb_shinfo(skb)->frags[wnd_idx].size;
11191 /* in non-LSO too fragmented packet should always
11198 if (unlikely(to_copy))
11199 DP(NETIF_MSG_TX_QUEUED,
11200 "Linearization IS REQUIRED for %s packet. "
11201 "num_frags %d hlen %d first_bd_sz %d\n",
11202 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11203 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11209 /* called with netif_tx_lock
11210 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11211 * netif_wake_queue()
11213 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11215 struct bnx2x *bp = netdev_priv(dev);
11216 struct bnx2x_fastpath *fp, *fp_stat;
11217 struct netdev_queue *txq;
11218 struct sw_tx_bd *tx_buf;
11219 struct eth_tx_start_bd *tx_start_bd;
11220 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11221 struct eth_tx_parse_bd *pbd = NULL;
11222 u16 pkt_prod, bd_prod;
11224 dma_addr_t mapping;
11225 u32 xmit_type = bnx2x_xmit_type(bp, skb);
11228 __le16 pkt_size = 0;
11230 #ifdef BNX2X_STOP_ON_ERROR
11231 if (unlikely(bp->panic))
11232 return NETDEV_TX_BUSY;
11235 fp_index = skb_get_queue_mapping(skb);
11236 txq = netdev_get_tx_queue(dev, fp_index);
11238 fp = &bp->fp[fp_index + bp->num_rx_queues];
11239 fp_stat = &bp->fp[fp_index];
11241 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11242 fp_stat->eth_q_stats.driver_xoff++;
11243 netif_tx_stop_queue(txq);
11244 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11245 return NETDEV_TX_BUSY;
11248 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11249 " gso type %x xmit_type %x\n",
11250 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11251 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11253 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11254 /* First, check if we need to linearize the skb (due to FW
11255 restrictions). No need to check fragmentation if page size > 8K
11256 (there will be no violation to FW restrictions) */
11257 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11258 /* Statistics of linearization */
11260 if (skb_linearize(skb) != 0) {
11261 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11262 "silently dropping this SKB\n");
11263 dev_kfree_skb_any(skb);
11264 return NETDEV_TX_OK;
11270 Please read carefully. First we use one BD which we mark as start,
11271 then we have a parsing info BD (used for TSO or xsum),
11272 and only then we have the rest of the TSO BDs.
11273 (don't forget to mark the last one as last,
11274 and to unmap only AFTER you write to the BD ...)
11275 And above all, all pdb sizes are in words - NOT DWORDS!
11278 pkt_prod = fp->tx_pkt_prod++;
11279 bd_prod = TX_BD(fp->tx_bd_prod);
11281 /* get a tx_buf and first BD */
11282 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11283 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11285 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11286 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11287 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11289 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11291 /* remember the first BD of the packet */
11292 tx_buf->first_bd = fp->tx_bd_prod;
11296 DP(NETIF_MSG_TX_QUEUED,
11297 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11298 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11301 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11302 (bp->flags & HW_VLAN_TX_FLAG)) {
11303 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11304 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11307 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11309 /* turn on parsing and get a BD */
11310 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11311 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11313 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11315 if (xmit_type & XMIT_CSUM) {
11316 hlen = (skb_network_header(skb) - skb->data) / 2;
11318 /* for now NS flag is not used in Linux */
11320 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11321 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11323 pbd->ip_hlen = (skb_transport_header(skb) -
11324 skb_network_header(skb)) / 2;
11326 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11328 pbd->total_hlen = cpu_to_le16(hlen);
11331 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11333 if (xmit_type & XMIT_CSUM_V4)
11334 tx_start_bd->bd_flags.as_bitfield |=
11335 ETH_TX_BD_FLAGS_IP_CSUM;
11337 tx_start_bd->bd_flags.as_bitfield |=
11338 ETH_TX_BD_FLAGS_IPV6;
11340 if (xmit_type & XMIT_CSUM_TCP) {
11341 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11344 s8 fix = SKB_CS_OFF(skb); /* signed! */
11346 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11348 DP(NETIF_MSG_TX_QUEUED,
11349 "hlen %d fix %d csum before fix %x\n",
11350 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11352 /* HW bug: fixup the CSUM */
11353 pbd->tcp_pseudo_csum =
11354 bnx2x_csum_fix(skb_transport_header(skb),
11357 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11358 pbd->tcp_pseudo_csum);
11362 mapping = pci_map_single(bp->pdev, skb->data,
11363 skb_headlen(skb), PCI_DMA_TODEVICE);
11365 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11366 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11367 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11368 tx_start_bd->nbd = cpu_to_le16(nbd);
11369 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11370 pkt_size = tx_start_bd->nbytes;
11372 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11373 " nbytes %d flags %x vlan %x\n",
11374 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11375 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11376 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11378 if (xmit_type & XMIT_GSO) {
11380 DP(NETIF_MSG_TX_QUEUED,
11381 "TSO packet len %d hlen %d total len %d tso size %d\n",
11382 skb->len, hlen, skb_headlen(skb),
11383 skb_shinfo(skb)->gso_size);
11385 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11387 if (unlikely(skb_headlen(skb) > hlen))
11388 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11389 hlen, bd_prod, ++nbd);
11391 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11392 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11393 pbd->tcp_flags = pbd_tcp_flags(skb);
11395 if (xmit_type & XMIT_GSO_V4) {
11396 pbd->ip_id = swab16(ip_hdr(skb)->id);
11397 pbd->tcp_pseudo_csum =
11398 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11399 ip_hdr(skb)->daddr,
11400 0, IPPROTO_TCP, 0));
11403 pbd->tcp_pseudo_csum =
11404 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11405 &ipv6_hdr(skb)->daddr,
11406 0, IPPROTO_TCP, 0));
11408 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11410 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11412 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11413 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11415 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11416 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11417 if (total_pkt_bd == NULL)
11418 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11420 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11421 frag->size, PCI_DMA_TODEVICE);
11423 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11424 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11425 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11426 le16_add_cpu(&pkt_size, frag->size);
11428 DP(NETIF_MSG_TX_QUEUED,
11429 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11430 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11431 le16_to_cpu(tx_data_bd->nbytes));
11434 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11436 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11438 /* now send a tx doorbell, counting the next BD
11439 * if the packet contains or ends with it
11441 if (TX_BD_POFF(bd_prod) < nbd)
11444 if (total_pkt_bd != NULL)
11445 total_pkt_bd->total_pkt_bytes = pkt_size;
11448 DP(NETIF_MSG_TX_QUEUED,
11449 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11450 " tcp_flags %x xsum %x seq %u hlen %u\n",
11451 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11452 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11453 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11455 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11458 * Make sure that the BD data is updated before updating the producer
11459 * since FW might read the BD right after the producer is updated.
11460 * This is only applicable for weak-ordered memory model archs such
11461 * as IA-64. The following barrier is also mandatory since FW will
11462 * assumes packets must have BDs.
11466 fp->tx_db.data.prod += nbd;
11468 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11472 fp->tx_bd_prod += nbd;
11474 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11475 netif_tx_stop_queue(txq);
11476 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11477 if we put Tx into XOFF state. */
11479 fp_stat->eth_q_stats.driver_xoff++;
11480 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11481 netif_tx_wake_queue(txq);
11485 return NETDEV_TX_OK;
11488 /* called with rtnl_lock */
11489 static int bnx2x_open(struct net_device *dev)
11491 struct bnx2x *bp = netdev_priv(dev);
11493 netif_carrier_off(dev);
11495 bnx2x_set_power_state(bp, PCI_D0);
11497 return bnx2x_nic_load(bp, LOAD_OPEN);
11500 /* called with rtnl_lock */
11501 static int bnx2x_close(struct net_device *dev)
11503 struct bnx2x *bp = netdev_priv(dev);
11505 /* Unload the driver, release IRQs */
11506 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11507 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11508 if (!CHIP_REV_IS_SLOW(bp))
11509 bnx2x_set_power_state(bp, PCI_D3hot);
11514 /* called with netif_tx_lock from dev_mcast.c */
11515 static void bnx2x_set_rx_mode(struct net_device *dev)
11517 struct bnx2x *bp = netdev_priv(dev);
11518 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11519 int port = BP_PORT(bp);
11521 if (bp->state != BNX2X_STATE_OPEN) {
11522 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11526 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11528 if (dev->flags & IFF_PROMISC)
11529 rx_mode = BNX2X_RX_MODE_PROMISC;
11531 else if ((dev->flags & IFF_ALLMULTI) ||
11532 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11533 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11535 else { /* some multicasts */
11536 if (CHIP_IS_E1(bp)) {
11537 int i, old, offset;
11538 struct dev_mc_list *mclist;
11539 struct mac_configuration_cmd *config =
11540 bnx2x_sp(bp, mcast_config);
11542 for (i = 0, mclist = dev->mc_list;
11543 mclist && (i < dev->mc_count);
11544 i++, mclist = mclist->next) {
11546 config->config_table[i].
11547 cam_entry.msb_mac_addr =
11548 swab16(*(u16 *)&mclist->dmi_addr[0]);
11549 config->config_table[i].
11550 cam_entry.middle_mac_addr =
11551 swab16(*(u16 *)&mclist->dmi_addr[2]);
11552 config->config_table[i].
11553 cam_entry.lsb_mac_addr =
11554 swab16(*(u16 *)&mclist->dmi_addr[4]);
11555 config->config_table[i].cam_entry.flags =
11557 config->config_table[i].
11558 target_table_entry.flags = 0;
11559 config->config_table[i].target_table_entry.
11560 clients_bit_vector =
11561 cpu_to_le32(1 << BP_L_ID(bp));
11562 config->config_table[i].
11563 target_table_entry.vlan_id = 0;
11566 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11567 config->config_table[i].
11568 cam_entry.msb_mac_addr,
11569 config->config_table[i].
11570 cam_entry.middle_mac_addr,
11571 config->config_table[i].
11572 cam_entry.lsb_mac_addr);
11574 old = config->hdr.length;
11576 for (; i < old; i++) {
11577 if (CAM_IS_INVALID(config->
11578 config_table[i])) {
11579 /* already invalidated */
11583 CAM_INVALIDATE(config->
11588 if (CHIP_REV_IS_SLOW(bp))
11589 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11591 offset = BNX2X_MAX_MULTICAST*(1 + port);
11593 config->hdr.length = i;
11594 config->hdr.offset = offset;
11595 config->hdr.client_id = bp->fp->cl_id;
11596 config->hdr.reserved1 = 0;
11598 bp->set_mac_pending++;
11601 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11602 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11603 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11606 /* Accept one or more multicasts */
11607 struct dev_mc_list *mclist;
11608 u32 mc_filter[MC_HASH_SIZE];
11609 u32 crc, bit, regidx;
11612 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11614 for (i = 0, mclist = dev->mc_list;
11615 mclist && (i < dev->mc_count);
11616 i++, mclist = mclist->next) {
11618 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11621 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11622 bit = (crc >> 24) & 0xff;
11625 mc_filter[regidx] |= (1 << bit);
11628 for (i = 0; i < MC_HASH_SIZE; i++)
11629 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11634 bp->rx_mode = rx_mode;
11635 bnx2x_set_storm_rx_mode(bp);
11638 /* called with rtnl_lock */
11639 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11641 struct sockaddr *addr = p;
11642 struct bnx2x *bp = netdev_priv(dev);
11644 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11647 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11648 if (netif_running(dev)) {
11649 if (CHIP_IS_E1(bp))
11650 bnx2x_set_eth_mac_addr_e1(bp, 1);
11652 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11658 /* called with rtnl_lock */
11659 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11660 int devad, u16 addr)
11662 struct bnx2x *bp = netdev_priv(netdev);
11665 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11667 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11668 prtad, devad, addr);
11670 if (prtad != bp->mdio.prtad) {
11671 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11672 prtad, bp->mdio.prtad);
11676 /* The HW expects different devad if CL22 is used */
11677 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11679 bnx2x_acquire_phy_lock(bp);
11680 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11681 devad, addr, &value);
11682 bnx2x_release_phy_lock(bp);
11683 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11690 /* called with rtnl_lock */
11691 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11692 u16 addr, u16 value)
11694 struct bnx2x *bp = netdev_priv(netdev);
11695 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11698 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11699 " value 0x%x\n", prtad, devad, addr, value);
11701 if (prtad != bp->mdio.prtad) {
11702 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11703 prtad, bp->mdio.prtad);
11707 /* The HW expects different devad if CL22 is used */
11708 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11710 bnx2x_acquire_phy_lock(bp);
11711 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11712 devad, addr, value);
11713 bnx2x_release_phy_lock(bp);
11717 /* called with rtnl_lock */
11718 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11720 struct bnx2x *bp = netdev_priv(dev);
11721 struct mii_ioctl_data *mdio = if_mii(ifr);
11723 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11724 mdio->phy_id, mdio->reg_num, mdio->val_in);
11726 if (!netif_running(dev))
11729 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11732 /* called with rtnl_lock */
11733 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11735 struct bnx2x *bp = netdev_priv(dev);
11738 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11739 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11742 /* This does not race with packet allocation
11743 * because the actual alloc size is
11744 * only updated as part of load
11746 dev->mtu = new_mtu;
11748 if (netif_running(dev)) {
11749 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11750 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11756 static void bnx2x_tx_timeout(struct net_device *dev)
11758 struct bnx2x *bp = netdev_priv(dev);
11760 #ifdef BNX2X_STOP_ON_ERROR
11764 /* This allows the netif to be shutdown gracefully before resetting */
11765 schedule_work(&bp->reset_task);
11769 /* called with rtnl_lock */
11770 static void bnx2x_vlan_rx_register(struct net_device *dev,
11771 struct vlan_group *vlgrp)
11773 struct bnx2x *bp = netdev_priv(dev);
11777 /* Set flags according to the required capabilities */
11778 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11780 if (dev->features & NETIF_F_HW_VLAN_TX)
11781 bp->flags |= HW_VLAN_TX_FLAG;
11783 if (dev->features & NETIF_F_HW_VLAN_RX)
11784 bp->flags |= HW_VLAN_RX_FLAG;
11786 if (netif_running(dev))
11787 bnx2x_set_client_config(bp);
11792 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11793 static void poll_bnx2x(struct net_device *dev)
11795 struct bnx2x *bp = netdev_priv(dev);
11797 disable_irq(bp->pdev->irq);
11798 bnx2x_interrupt(bp->pdev->irq, dev);
11799 enable_irq(bp->pdev->irq);
11803 static const struct net_device_ops bnx2x_netdev_ops = {
11804 .ndo_open = bnx2x_open,
11805 .ndo_stop = bnx2x_close,
11806 .ndo_start_xmit = bnx2x_start_xmit,
11807 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11808 .ndo_set_mac_address = bnx2x_change_mac_addr,
11809 .ndo_validate_addr = eth_validate_addr,
11810 .ndo_do_ioctl = bnx2x_ioctl,
11811 .ndo_change_mtu = bnx2x_change_mtu,
11812 .ndo_tx_timeout = bnx2x_tx_timeout,
11814 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11816 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11817 .ndo_poll_controller = poll_bnx2x,
11821 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11822 struct net_device *dev)
11827 SET_NETDEV_DEV(dev, &pdev->dev);
11828 bp = netdev_priv(dev);
11833 bp->func = PCI_FUNC(pdev->devfn);
11835 rc = pci_enable_device(pdev);
11837 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11841 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11842 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11845 goto err_out_disable;
11848 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11849 printk(KERN_ERR PFX "Cannot find second PCI device"
11850 " base address, aborting\n");
11852 goto err_out_disable;
11855 if (atomic_read(&pdev->enable_cnt) == 1) {
11856 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11858 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11860 goto err_out_disable;
11863 pci_set_master(pdev);
11864 pci_save_state(pdev);
11867 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11868 if (bp->pm_cap == 0) {
11869 printk(KERN_ERR PFX "Cannot find power management"
11870 " capability, aborting\n");
11872 goto err_out_release;
11875 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11876 if (bp->pcie_cap == 0) {
11877 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11880 goto err_out_release;
11883 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11884 bp->flags |= USING_DAC_FLAG;
11885 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11886 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11887 " failed, aborting\n");
11889 goto err_out_release;
11892 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11893 printk(KERN_ERR PFX "System does not support DMA,"
11896 goto err_out_release;
11899 dev->mem_start = pci_resource_start(pdev, 0);
11900 dev->base_addr = dev->mem_start;
11901 dev->mem_end = pci_resource_end(pdev, 0);
11903 dev->irq = pdev->irq;
11905 bp->regview = pci_ioremap_bar(pdev, 0);
11906 if (!bp->regview) {
11907 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11909 goto err_out_release;
11912 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11913 min_t(u64, BNX2X_DB_SIZE,
11914 pci_resource_len(pdev, 2)));
11915 if (!bp->doorbells) {
11916 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11918 goto err_out_unmap;
11921 bnx2x_set_power_state(bp, PCI_D0);
11923 /* clean indirect addresses */
11924 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11925 PCICFG_VENDOR_ID_OFFSET);
11926 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11927 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11928 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11929 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11931 dev->watchdog_timeo = TX_TIMEOUT;
11933 dev->netdev_ops = &bnx2x_netdev_ops;
11934 dev->ethtool_ops = &bnx2x_ethtool_ops;
11935 dev->features |= NETIF_F_SG;
11936 dev->features |= NETIF_F_HW_CSUM;
11937 if (bp->flags & USING_DAC_FLAG)
11938 dev->features |= NETIF_F_HIGHDMA;
11939 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11940 dev->features |= NETIF_F_TSO6;
11942 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11943 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11945 dev->vlan_features |= NETIF_F_SG;
11946 dev->vlan_features |= NETIF_F_HW_CSUM;
11947 if (bp->flags & USING_DAC_FLAG)
11948 dev->vlan_features |= NETIF_F_HIGHDMA;
11949 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11950 dev->vlan_features |= NETIF_F_TSO6;
11953 /* get_port_hwinfo() will set prtad and mmds properly */
11954 bp->mdio.prtad = MDIO_PRTAD_NONE;
11956 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11957 bp->mdio.dev = dev;
11958 bp->mdio.mdio_read = bnx2x_mdio_read;
11959 bp->mdio.mdio_write = bnx2x_mdio_write;
11965 iounmap(bp->regview);
11966 bp->regview = NULL;
11968 if (bp->doorbells) {
11969 iounmap(bp->doorbells);
11970 bp->doorbells = NULL;
11974 if (atomic_read(&pdev->enable_cnt) == 1)
11975 pci_release_regions(pdev);
11978 pci_disable_device(pdev);
11979 pci_set_drvdata(pdev, NULL);
11985 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11986 int *width, int *speed)
11988 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11990 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11992 /* return value of 1=2.5GHz 2=5GHz */
11993 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11996 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11998 const struct firmware *firmware = bp->firmware;
11999 struct bnx2x_fw_file_hdr *fw_hdr;
12000 struct bnx2x_fw_file_section *sections;
12001 u32 offset, len, num_ops;
12006 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12009 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12010 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12012 /* Make sure none of the offsets and sizes make us read beyond
12013 * the end of the firmware data */
12014 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12015 offset = be32_to_cpu(sections[i].offset);
12016 len = be32_to_cpu(sections[i].len);
12017 if (offset + len > firmware->size) {
12018 printk(KERN_ERR PFX "Section %d length is out of "
12024 /* Likewise for the init_ops offsets */
12025 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12026 ops_offsets = (u16 *)(firmware->data + offset);
12027 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12029 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12030 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12031 printk(KERN_ERR PFX "Section offset %d is out of "
12037 /* Check FW version */
12038 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12039 fw_ver = firmware->data + offset;
12040 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12041 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12042 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12043 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12044 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12045 " Should be %d.%d.%d.%d\n",
12046 fw_ver[0], fw_ver[1], fw_ver[2],
12047 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12048 BCM_5710_FW_MINOR_VERSION,
12049 BCM_5710_FW_REVISION_VERSION,
12050 BCM_5710_FW_ENGINEERING_VERSION);
12057 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12059 const __be32 *source = (const __be32 *)_source;
12060 u32 *target = (u32 *)_target;
12063 for (i = 0; i < n/4; i++)
12064 target[i] = be32_to_cpu(source[i]);
12068 Ops array is stored in the following format:
12069 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12071 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12073 const __be32 *source = (const __be32 *)_source;
12074 struct raw_op *target = (struct raw_op *)_target;
12077 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12078 tmp = be32_to_cpu(source[j]);
12079 target[i].op = (tmp >> 24) & 0xff;
12080 target[i].offset = tmp & 0xffffff;
12081 target[i].raw_data = be32_to_cpu(source[j+1]);
12085 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12087 const __be16 *source = (const __be16 *)_source;
12088 u16 *target = (u16 *)_target;
12091 for (i = 0; i < n/2; i++)
12092 target[i] = be16_to_cpu(source[i]);
12095 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12097 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12098 bp->arr = kmalloc(len, GFP_KERNEL); \
12100 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12101 "for "#arr"\n", len); \
12104 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12105 (u8 *)bp->arr, len); \
12108 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12110 char fw_file_name[40] = {0};
12111 struct bnx2x_fw_file_hdr *fw_hdr;
12114 /* Create a FW file name */
12115 if (CHIP_IS_E1(bp))
12116 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
12118 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12120 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12121 BCM_5710_FW_MAJOR_VERSION,
12122 BCM_5710_FW_MINOR_VERSION,
12123 BCM_5710_FW_REVISION_VERSION,
12124 BCM_5710_FW_ENGINEERING_VERSION);
12126 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12128 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12130 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12132 goto request_firmware_exit;
12135 rc = bnx2x_check_firmware(bp);
12137 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12138 goto request_firmware_exit;
12141 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12143 /* Initialize the pointers to the init arrays */
12145 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12148 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12151 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12154 /* STORMs firmware */
12155 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12156 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12157 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12158 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12159 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12160 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12161 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12162 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12163 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12164 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12165 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12166 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12167 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12168 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12169 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12170 be32_to_cpu(fw_hdr->csem_pram_data.offset);
12174 init_offsets_alloc_err:
12175 kfree(bp->init_ops);
12176 init_ops_alloc_err:
12177 kfree(bp->init_data);
12178 request_firmware_exit:
12179 release_firmware(bp->firmware);
12185 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12186 const struct pci_device_id *ent)
12188 struct net_device *dev = NULL;
12190 int pcie_width, pcie_speed;
12193 /* dev zeroed in init_etherdev */
12194 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12196 printk(KERN_ERR PFX "Cannot allocate net device\n");
12200 bp = netdev_priv(dev);
12201 bp->msglevel = debug;
12203 pci_set_drvdata(pdev, dev);
12205 rc = bnx2x_init_dev(pdev, dev);
12211 rc = bnx2x_init_bp(bp);
12213 goto init_one_exit;
12215 /* Set init arrays */
12216 rc = bnx2x_init_firmware(bp, &pdev->dev);
12218 printk(KERN_ERR PFX "Error loading firmware\n");
12219 goto init_one_exit;
12222 rc = register_netdev(dev);
12224 dev_err(&pdev->dev, "Cannot register net device\n");
12225 goto init_one_exit;
12228 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12229 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12230 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12231 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12232 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12233 dev->base_addr, bp->pdev->irq);
12234 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12240 iounmap(bp->regview);
12243 iounmap(bp->doorbells);
12247 if (atomic_read(&pdev->enable_cnt) == 1)
12248 pci_release_regions(pdev);
12250 pci_disable_device(pdev);
12251 pci_set_drvdata(pdev, NULL);
12256 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12258 struct net_device *dev = pci_get_drvdata(pdev);
12262 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12265 bp = netdev_priv(dev);
12267 unregister_netdev(dev);
12269 kfree(bp->init_ops_offsets);
12270 kfree(bp->init_ops);
12271 kfree(bp->init_data);
12272 release_firmware(bp->firmware);
12275 iounmap(bp->regview);
12278 iounmap(bp->doorbells);
12282 if (atomic_read(&pdev->enable_cnt) == 1)
12283 pci_release_regions(pdev);
12285 pci_disable_device(pdev);
12286 pci_set_drvdata(pdev, NULL);
12289 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12291 struct net_device *dev = pci_get_drvdata(pdev);
12295 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12298 bp = netdev_priv(dev);
12302 pci_save_state(pdev);
12304 if (!netif_running(dev)) {
12309 netif_device_detach(dev);
12311 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12313 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12320 static int bnx2x_resume(struct pci_dev *pdev)
12322 struct net_device *dev = pci_get_drvdata(pdev);
12327 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12330 bp = netdev_priv(dev);
12334 pci_restore_state(pdev);
12336 if (!netif_running(dev)) {
12341 bnx2x_set_power_state(bp, PCI_D0);
12342 netif_device_attach(dev);
12344 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12351 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12355 bp->state = BNX2X_STATE_ERROR;
12357 bp->rx_mode = BNX2X_RX_MODE_NONE;
12359 bnx2x_netif_stop(bp, 0);
12361 del_timer_sync(&bp->timer);
12362 bp->stats_state = STATS_STATE_DISABLED;
12363 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12366 bnx2x_free_irq(bp);
12368 if (CHIP_IS_E1(bp)) {
12369 struct mac_configuration_cmd *config =
12370 bnx2x_sp(bp, mcast_config);
12372 for (i = 0; i < config->hdr.length; i++)
12373 CAM_INVALIDATE(config->config_table[i]);
12376 /* Free SKBs, SGEs, TPA pool and driver internals */
12377 bnx2x_free_skbs(bp);
12378 for_each_rx_queue(bp, i)
12379 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12380 for_each_rx_queue(bp, i)
12381 netif_napi_del(&bnx2x_fp(bp, i, napi));
12382 bnx2x_free_mem(bp);
12384 bp->state = BNX2X_STATE_CLOSED;
12386 netif_carrier_off(bp->dev);
12391 static void bnx2x_eeh_recover(struct bnx2x *bp)
12395 mutex_init(&bp->port.phy_mutex);
12397 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12398 bp->link_params.shmem_base = bp->common.shmem_base;
12399 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12401 if (!bp->common.shmem_base ||
12402 (bp->common.shmem_base < 0xA0000) ||
12403 (bp->common.shmem_base >= 0xC0000)) {
12404 BNX2X_DEV_INFO("MCP not active\n");
12405 bp->flags |= NO_MCP_FLAG;
12409 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12410 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12411 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12412 BNX2X_ERR("BAD MCP validity signature\n");
12414 if (!BP_NOMCP(bp)) {
12415 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12416 & DRV_MSG_SEQ_NUMBER_MASK);
12417 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12422 * bnx2x_io_error_detected - called when PCI error is detected
12423 * @pdev: Pointer to PCI device
12424 * @state: The current pci connection state
12426 * This function is called after a PCI bus error affecting
12427 * this device has been detected.
12429 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12430 pci_channel_state_t state)
12432 struct net_device *dev = pci_get_drvdata(pdev);
12433 struct bnx2x *bp = netdev_priv(dev);
12437 netif_device_detach(dev);
12439 if (state == pci_channel_io_perm_failure) {
12441 return PCI_ERS_RESULT_DISCONNECT;
12444 if (netif_running(dev))
12445 bnx2x_eeh_nic_unload(bp);
12447 pci_disable_device(pdev);
12451 /* Request a slot reset */
12452 return PCI_ERS_RESULT_NEED_RESET;
12456 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12457 * @pdev: Pointer to PCI device
12459 * Restart the card from scratch, as if from a cold-boot.
12461 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12463 struct net_device *dev = pci_get_drvdata(pdev);
12464 struct bnx2x *bp = netdev_priv(dev);
12468 if (pci_enable_device(pdev)) {
12469 dev_err(&pdev->dev,
12470 "Cannot re-enable PCI device after reset\n");
12472 return PCI_ERS_RESULT_DISCONNECT;
12475 pci_set_master(pdev);
12476 pci_restore_state(pdev);
12478 if (netif_running(dev))
12479 bnx2x_set_power_state(bp, PCI_D0);
12483 return PCI_ERS_RESULT_RECOVERED;
12487 * bnx2x_io_resume - called when traffic can start flowing again
12488 * @pdev: Pointer to PCI device
12490 * This callback is called when the error recovery driver tells us that
12491 * its OK to resume normal operation.
12493 static void bnx2x_io_resume(struct pci_dev *pdev)
12495 struct net_device *dev = pci_get_drvdata(pdev);
12496 struct bnx2x *bp = netdev_priv(dev);
12500 bnx2x_eeh_recover(bp);
12502 if (netif_running(dev))
12503 bnx2x_nic_load(bp, LOAD_NORMAL);
12505 netif_device_attach(dev);
12510 static struct pci_error_handlers bnx2x_err_handler = {
12511 .error_detected = bnx2x_io_error_detected,
12512 .slot_reset = bnx2x_io_slot_reset,
12513 .resume = bnx2x_io_resume,
12516 static struct pci_driver bnx2x_pci_driver = {
12517 .name = DRV_MODULE_NAME,
12518 .id_table = bnx2x_pci_tbl,
12519 .probe = bnx2x_init_one,
12520 .remove = __devexit_p(bnx2x_remove_one),
12521 .suspend = bnx2x_suspend,
12522 .resume = bnx2x_resume,
12523 .err_handler = &bnx2x_err_handler,
12526 static int __init bnx2x_init(void)
12530 printk(KERN_INFO "%s", version);
12532 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12533 if (bnx2x_wq == NULL) {
12534 printk(KERN_ERR PFX "Cannot create workqueue\n");
12538 ret = pci_register_driver(&bnx2x_pci_driver);
12540 printk(KERN_ERR PFX "Cannot register driver\n");
12541 destroy_workqueue(bnx2x_wq);
12546 static void __exit bnx2x_cleanup(void)
12548 pci_unregister_driver(&bnx2x_pci_driver);
12550 destroy_workqueue(bnx2x_wq);
12553 module_init(bnx2x_init);
12554 module_exit(bnx2x_cleanup);
12558 /* count denotes the number of new completions we have seen */
12559 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12561 struct eth_spe *spe;
12563 #ifdef BNX2X_STOP_ON_ERROR
12564 if (unlikely(bp->panic))
12568 spin_lock_bh(&bp->spq_lock);
12569 bp->cnic_spq_pending -= count;
12571 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12572 bp->cnic_spq_pending++) {
12574 if (!bp->cnic_kwq_pending)
12577 spe = bnx2x_sp_get_next(bp);
12578 *spe = *bp->cnic_kwq_cons;
12580 bp->cnic_kwq_pending--;
12582 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12583 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12585 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12586 bp->cnic_kwq_cons = bp->cnic_kwq;
12588 bp->cnic_kwq_cons++;
12590 bnx2x_sp_prod_update(bp);
12591 spin_unlock_bh(&bp->spq_lock);
12594 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12595 struct kwqe_16 *kwqes[], u32 count)
12597 struct bnx2x *bp = netdev_priv(dev);
12600 #ifdef BNX2X_STOP_ON_ERROR
12601 if (unlikely(bp->panic))
12605 spin_lock_bh(&bp->spq_lock);
12607 for (i = 0; i < count; i++) {
12608 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12610 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12613 *bp->cnic_kwq_prod = *spe;
12615 bp->cnic_kwq_pending++;
12617 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12618 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12619 spe->data.mac_config_addr.hi,
12620 spe->data.mac_config_addr.lo,
12621 bp->cnic_kwq_pending);
12623 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12624 bp->cnic_kwq_prod = bp->cnic_kwq;
12626 bp->cnic_kwq_prod++;
12629 spin_unlock_bh(&bp->spq_lock);
12631 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12632 bnx2x_cnic_sp_post(bp, 0);
12637 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12639 struct cnic_ops *c_ops;
12642 mutex_lock(&bp->cnic_mutex);
12643 c_ops = bp->cnic_ops;
12645 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12646 mutex_unlock(&bp->cnic_mutex);
12651 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12653 struct cnic_ops *c_ops;
12657 c_ops = rcu_dereference(bp->cnic_ops);
12659 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12666 * for commands that have no data
12668 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12670 struct cnic_ctl_info ctl = {0};
12674 return bnx2x_cnic_ctl_send(bp, &ctl);
12677 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12679 struct cnic_ctl_info ctl;
12681 /* first we tell CNIC and only then we count this as a completion */
12682 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12683 ctl.data.comp.cid = cid;
12685 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12686 bnx2x_cnic_sp_post(bp, 1);
12689 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12691 struct bnx2x *bp = netdev_priv(dev);
12694 switch (ctl->cmd) {
12695 case DRV_CTL_CTXTBL_WR_CMD: {
12696 u32 index = ctl->data.io.offset;
12697 dma_addr_t addr = ctl->data.io.dma_addr;
12699 bnx2x_ilt_wr(bp, index, addr);
12703 case DRV_CTL_COMPLETION_CMD: {
12704 int count = ctl->data.comp.comp_count;
12706 bnx2x_cnic_sp_post(bp, count);
12710 /* rtnl_lock is held. */
12711 case DRV_CTL_START_L2_CMD: {
12712 u32 cli = ctl->data.ring.client_id;
12714 bp->rx_mode_cl_mask |= (1 << cli);
12715 bnx2x_set_storm_rx_mode(bp);
12719 /* rtnl_lock is held. */
12720 case DRV_CTL_STOP_L2_CMD: {
12721 u32 cli = ctl->data.ring.client_id;
12723 bp->rx_mode_cl_mask &= ~(1 << cli);
12724 bnx2x_set_storm_rx_mode(bp);
12729 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12736 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12738 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12740 if (bp->flags & USING_MSIX_FLAG) {
12741 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12742 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12743 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12745 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12746 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12748 cp->irq_arr[0].status_blk = bp->cnic_sb;
12749 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12750 cp->irq_arr[1].status_blk = bp->def_status_blk;
12751 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12756 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12759 struct bnx2x *bp = netdev_priv(dev);
12760 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12765 if (atomic_read(&bp->intr_sem) != 0)
12768 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12772 bp->cnic_kwq_cons = bp->cnic_kwq;
12773 bp->cnic_kwq_prod = bp->cnic_kwq;
12774 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12776 bp->cnic_spq_pending = 0;
12777 bp->cnic_kwq_pending = 0;
12779 bp->cnic_data = data;
12782 cp->drv_state = CNIC_DRV_STATE_REGD;
12784 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12786 bnx2x_setup_cnic_irq_info(bp);
12787 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12788 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12789 rcu_assign_pointer(bp->cnic_ops, ops);
12794 static int bnx2x_unregister_cnic(struct net_device *dev)
12796 struct bnx2x *bp = netdev_priv(dev);
12797 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12799 mutex_lock(&bp->cnic_mutex);
12800 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12801 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12802 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12805 rcu_assign_pointer(bp->cnic_ops, NULL);
12806 mutex_unlock(&bp->cnic_mutex);
12808 kfree(bp->cnic_kwq);
12809 bp->cnic_kwq = NULL;
12814 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12816 struct bnx2x *bp = netdev_priv(dev);
12817 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12819 cp->drv_owner = THIS_MODULE;
12820 cp->chip_id = CHIP_ID(bp);
12821 cp->pdev = bp->pdev;
12822 cp->io_base = bp->regview;
12823 cp->io_base2 = bp->doorbells;
12824 cp->max_kwqe_pending = 8;
12825 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12826 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12827 cp->ctx_tbl_len = CNIC_ILT_LINES;
12828 cp->starting_cid = BCM_CNIC_CID_START;
12829 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12830 cp->drv_ctl = bnx2x_drv_ctl;
12831 cp->drv_register_cnic = bnx2x_register_cnic;
12832 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12836 EXPORT_SYMBOL(bnx2x_cnic_probe);
12838 #endif /* BCM_CNIC */