1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.52.1"
60 #define DRV_MODULE_RELDATE "2009/08/12"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
154 * locking is done by mcp
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
176 static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
203 struct dmae_command dmae;
204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 memset(&dmae, 0, sizeof(struct dmae_command));
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 DMAE_CMD_ENDIANITY_DW_SWAP |
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248 mutex_lock(&bp->dmae_mutex);
252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
260 BNX2X_ERR("DMAE timeout!\n");
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
271 mutex_unlock(&bp->dmae_mutex);
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
276 struct dmae_command dmae;
277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 memset(&dmae, 0, sizeof(struct dmae_command));
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
299 DMAE_CMD_ENDIANITY_DW_SWAP |
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
320 mutex_lock(&bp->dmae_mutex);
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329 while (*wb_comp != DMAE_COMP_VAL) {
332 BNX2X_ERR("DMAE timeout!\n");
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
346 mutex_unlock(&bp->dmae_mutex);
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 REG_RD_DMAE(bp, reg, wb_data, 2);
381 return HILO_U64(wb_data[0], wb_data[1]);
385 static int bnx2x_mc_assert(struct bnx2x *bp)
389 u32 row0, row1, row2, row3;
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
506 static void bnx2x_fw_dump(struct bnx2x *bp)
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513 mark = ((mark + 0x3) & ~0x3);
514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
516 printk(KERN_ERR PFX);
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
522 printk(KERN_CONT "%s", (char *)data);
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
529 printk(KERN_CONT "%s", (char *)data);
531 printk(KERN_ERR PFX "end of fw dump\n");
534 static void bnx2x_panic_dump(struct bnx2x *bp)
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
542 BNX2X_ERR("begin crash dump -----------------\n");
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
553 for_each_rx_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i];
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
559 i, fp->rx_bd_prod, fp->rx_bd_cons,
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579 fp->status_blk->c_status_block.status_block_index,
580 fp->tx_db.data.prod);
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590 for (j = start; j != end; j = RX_BD(j + 1)) {
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
600 for (j = start; j != end; j = RX_SGE(j + 1)) {
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 BNX2X_ERR("end crash dump -----------------\n");
646 static void bnx2x_int_enable(struct bnx2x *bp)
648 int port = BP_PORT(bp);
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
673 REG_WR(bp, addr, val);
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
681 REG_WR(bp, addr, val);
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
693 /* enable nig and gpio3 attention */
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
702 /* Make sure that interrupts are indeed enabled from here on */
706 static void bnx2x_int_disable(struct bnx2x *bp)
708 int port = BP_PORT(bp);
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
720 /* flush all outstanding writes */
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
733 /* disable interrupt handling */
734 atomic_inc(&bp->intr_sem);
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
741 /* make sure all ISRs are done */
743 synchronize_irq(bp->msix_table[0].vector);
748 for_each_queue(bp, i)
749 synchronize_irq(bp->msix_table[i + offset].vector);
751 synchronize_irq(bp->pdev->irq);
753 /* make sure sp_task is not running */
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
761 * General service functions
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765 u8 storm, u16 index, u8 op, u8 update)
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
769 struct igu_ack_register igu_ack;
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
782 /* Make sure that ACK is written */
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
789 struct host_status_block *fpsb = fp->status_blk;
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
818 * fast path service functions
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
823 /* Tell compiler that consumer and producer can change */
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
828 /* free skb in the packet ring at pos idx
829 * return idx of last bd freed
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
837 struct sk_buff *skb = tx_buf->skb;
838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853 BNX2X_ERR("BAD nbd!\n");
857 new_cons = nbd + tx_buf->first_bd;
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862 /* Skip a parse bd... */
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
885 dev_kfree_skb_any(skb);
886 tx_buf->first_bd = 0;
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
898 barrier(); /* Tell compiler that prod and cons can change */
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
906 #ifdef BNX2X_STOP_ON_ERROR
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
912 return (s16)(fp->bp->tx_ring_size) - used;
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
917 struct bnx2x *bp = fp->bp;
918 struct netdev_queue *txq;
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
922 #ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
931 while (sw_cons != hw_cons) {
934 pkt_cons = TX_BD(sw_cons);
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
939 hw_cons, sw_cons, pkt_cons);
941 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
954 /* TBD need a thresh? */
955 if (unlikely(netif_tx_queue_stopped(txq))) {
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
965 if ((netif_tx_queue_stopped(txq)) &&
966 (bp->state == BNX2X_STATE_OPEN) &&
967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968 netif_tx_wake_queue(txq);
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
985 fp->index, cid, command, bp->state,
986 rr_cqe->ramrod_cqe.ramrod_type);
991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
996 fp->state = BNX2X_FP_STATE_OPEN;
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1002 fp->state = BNX2X_FP_STATE_HALTED;
1006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
1014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041 bp->set_mac_pending--;
1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1047 bp->set_mac_pending--;
1052 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1053 command, bp->state);
1056 mb(); /* force bnx2x_wait_ramrod() to see the change */
1059 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1062 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1063 struct page *page = sw_buf->page;
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1066 /* Skip "next page" elements */
1070 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1071 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1072 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074 sw_buf->page = NULL;
1079 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1080 struct bnx2x_fastpath *fp, int last)
1084 for (i = 0; i < last; i++)
1085 bnx2x_free_rx_sge(bp, fp, i);
1088 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1089 struct bnx2x_fastpath *fp, u16 index)
1091 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1092 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1093 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1096 if (unlikely(page == NULL))
1099 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1100 PCI_DMA_FROMDEVICE);
1101 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1102 __free_pages(page, PAGES_PER_SGE_SHIFT);
1106 sw_buf->page = page;
1107 pci_unmap_addr_set(sw_buf, mapping, mapping);
1109 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1110 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1115 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1116 struct bnx2x_fastpath *fp, u16 index)
1118 struct sk_buff *skb;
1119 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1120 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1123 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1124 if (unlikely(skb == NULL))
1127 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1128 PCI_DMA_FROMDEVICE);
1129 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1135 pci_unmap_addr_set(rx_buf, mapping, mapping);
1137 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1138 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1143 /* note that we are not allocating a new skb,
1144 * we are just moving one from cons to prod
1145 * we are not creating a new mapping,
1146 * so there is no need to check for dma_mapping_error().
1148 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1149 struct sk_buff *skb, u16 cons, u16 prod)
1151 struct bnx2x *bp = fp->bp;
1152 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1153 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1154 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1155 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1157 pci_dma_sync_single_for_device(bp->pdev,
1158 pci_unmap_addr(cons_rx_buf, mapping),
1159 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1161 prod_rx_buf->skb = cons_rx_buf->skb;
1162 pci_unmap_addr_set(prod_rx_buf, mapping,
1163 pci_unmap_addr(cons_rx_buf, mapping));
1164 *prod_bd = *cons_bd;
1167 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1170 u16 last_max = fp->last_max_sge;
1172 if (SUB_S16(idx, last_max) > 0)
1173 fp->last_max_sge = idx;
1176 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1180 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1181 int idx = RX_SGE_CNT * i - 1;
1183 for (j = 0; j < 2; j++) {
1184 SGE_MASK_CLEAR_BIT(fp, idx);
1190 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1191 struct eth_fast_path_rx_cqe *fp_cqe)
1193 struct bnx2x *bp = fp->bp;
1194 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1195 le16_to_cpu(fp_cqe->len_on_bd)) >>
1197 u16 last_max, last_elem, first_elem;
1204 /* First mark all used pages */
1205 for (i = 0; i < sge_len; i++)
1206 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1208 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1209 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1211 /* Here we assume that the last SGE index is the biggest */
1212 prefetch((void *)(fp->sge_mask));
1213 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1215 last_max = RX_SGE(fp->last_max_sge);
1216 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1217 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1219 /* If ring is not full */
1220 if (last_elem + 1 != first_elem)
1223 /* Now update the prod */
1224 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1225 if (likely(fp->sge_mask[i]))
1228 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1229 delta += RX_SGE_MASK_ELEM_SZ;
1233 fp->rx_sge_prod += delta;
1234 /* clear page-end entries */
1235 bnx2x_clear_sge_mask_next_elems(fp);
1238 DP(NETIF_MSG_RX_STATUS,
1239 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1240 fp->last_max_sge, fp->rx_sge_prod);
1243 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1245 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1246 memset(fp->sge_mask, 0xff,
1247 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1249 /* Clear the two last indices in the page to 1:
1250 these are the indices that correspond to the "next" element,
1251 hence will never be indicated and should be removed from
1252 the calculations. */
1253 bnx2x_clear_sge_mask_next_elems(fp);
1256 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1257 struct sk_buff *skb, u16 cons, u16 prod)
1259 struct bnx2x *bp = fp->bp;
1260 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1261 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1262 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1265 /* move empty skb from pool to prod and map it */
1266 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1267 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1268 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1269 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1271 /* move partial skb from cons to pool (don't unmap yet) */
1272 fp->tpa_pool[queue] = *cons_rx_buf;
1274 /* mark bin state as start - print error if current state != stop */
1275 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1276 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1278 fp->tpa_state[queue] = BNX2X_TPA_START;
1280 /* point prod_bd to new skb */
1281 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1282 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1284 #ifdef BNX2X_STOP_ON_ERROR
1285 fp->tpa_queue_used |= (1 << queue);
1286 #ifdef __powerpc64__
1287 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1289 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1291 fp->tpa_queue_used);
1295 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 struct sk_buff *skb,
1297 struct eth_fast_path_rx_cqe *fp_cqe,
1300 struct sw_rx_page *rx_pg, old_rx_pg;
1301 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1302 u32 i, frag_len, frag_size, pages;
1306 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1307 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1309 /* This is needed in order to enable forwarding support */
1311 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1312 max(frag_size, (u32)len_on_bd));
1314 #ifdef BNX2X_STOP_ON_ERROR
1316 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1317 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1319 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1320 fp_cqe->pkt_len, len_on_bd);
1326 /* Run through the SGL and compose the fragmented skb */
1327 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1328 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1330 /* FW gives the indices of the SGE as if the ring is an array
1331 (meaning that "next" element will consume 2 indices) */
1332 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1333 rx_pg = &fp->rx_page_ring[sge_idx];
1336 /* If we fail to allocate a substitute page, we simply stop
1337 where we are and drop the whole packet */
1338 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1339 if (unlikely(err)) {
1340 fp->eth_q_stats.rx_skb_alloc_failed++;
1344 /* Unmap the page as we r going to pass it to the stack */
1345 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1346 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1348 /* Add one frag and update the appropriate fields in the skb */
1349 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1351 skb->data_len += frag_len;
1352 skb->truesize += frag_len;
1353 skb->len += frag_len;
1355 frag_size -= frag_len;
1361 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1362 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1365 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1366 struct sk_buff *skb = rx_buf->skb;
1368 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1370 /* Unmap skb in the pool anyway, as we are going to change
1371 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1373 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1374 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1376 if (likely(new_skb)) {
1377 /* fix ip xsum and give it to the stack */
1378 /* (no need to map the new skb) */
1381 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1382 PARSING_FLAGS_VLAN);
1383 int is_not_hwaccel_vlan_cqe =
1384 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1388 prefetch(((char *)(skb)) + 128);
1390 #ifdef BNX2X_STOP_ON_ERROR
1391 if (pad + len > bp->rx_buf_size) {
1392 BNX2X_ERR("skb_put is about to fail... "
1393 "pad %d len %d rx_buf_size %d\n",
1394 pad, len, bp->rx_buf_size);
1400 skb_reserve(skb, pad);
1403 skb->protocol = eth_type_trans(skb, bp->dev);
1404 skb->ip_summed = CHECKSUM_UNNECESSARY;
1409 iph = (struct iphdr *)skb->data;
1411 /* If there is no Rx VLAN offloading -
1412 take VLAN tag into an account */
1413 if (unlikely(is_not_hwaccel_vlan_cqe))
1414 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1417 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1420 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1421 &cqe->fast_path_cqe, cqe_idx)) {
1423 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1424 (!is_not_hwaccel_vlan_cqe))
1425 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1426 le16_to_cpu(cqe->fast_path_cqe.
1430 netif_receive_skb(skb);
1432 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1433 " - dropping packet!\n");
1438 /* put new skb in bin */
1439 fp->tpa_pool[queue].skb = new_skb;
1442 /* else drop the packet and keep the buffer in the bin */
1443 DP(NETIF_MSG_RX_STATUS,
1444 "Failed to allocate new skb - dropping packet!\n");
1445 fp->eth_q_stats.rx_skb_alloc_failed++;
1448 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1451 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1452 struct bnx2x_fastpath *fp,
1453 u16 bd_prod, u16 rx_comp_prod,
1456 struct ustorm_eth_rx_producers rx_prods = {0};
1459 /* Update producers */
1460 rx_prods.bd_prod = bd_prod;
1461 rx_prods.cqe_prod = rx_comp_prod;
1462 rx_prods.sge_prod = rx_sge_prod;
1465 * Make sure that the BD and SGE data is updated before updating the
1466 * producers since FW might read the BD/SGE right after the producer
1468 * This is only applicable for weak-ordered memory model archs such
1469 * as IA-64. The following barrier is also mandatory since FW will
1470 * assumes BDs must have buffers.
1474 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1475 REG_WR(bp, BAR_USTRORM_INTMEM +
1476 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1477 ((u32 *)&rx_prods)[i]);
1479 mmiowb(); /* keep prod updates ordered */
1481 DP(NETIF_MSG_RX_STATUS,
1482 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1483 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1486 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1488 struct bnx2x *bp = fp->bp;
1489 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1490 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1493 #ifdef BNX2X_STOP_ON_ERROR
1494 if (unlikely(bp->panic))
1498 /* CQ "next element" is of the size of the regular element,
1499 that's why it's ok here */
1500 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1501 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1504 bd_cons = fp->rx_bd_cons;
1505 bd_prod = fp->rx_bd_prod;
1506 bd_prod_fw = bd_prod;
1507 sw_comp_cons = fp->rx_comp_cons;
1508 sw_comp_prod = fp->rx_comp_prod;
1510 /* Memory barrier necessary as speculative reads of the rx
1511 * buffer can be ahead of the index in the status block
1515 DP(NETIF_MSG_RX_STATUS,
1516 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1517 fp->index, hw_comp_cons, sw_comp_cons);
1519 while (sw_comp_cons != hw_comp_cons) {
1520 struct sw_rx_bd *rx_buf = NULL;
1521 struct sk_buff *skb;
1522 union eth_rx_cqe *cqe;
1526 comp_ring_cons = RCQ_BD(sw_comp_cons);
1527 bd_prod = RX_BD(bd_prod);
1528 bd_cons = RX_BD(bd_cons);
1530 /* Prefetch the page containing the BD descriptor
1531 at producer's index. It will be needed when new skb is
1533 prefetch((void *)(PAGE_ALIGN((unsigned long)
1534 (&fp->rx_desc_ring[bd_prod])) -
1537 cqe = &fp->rx_comp_ring[comp_ring_cons];
1538 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1540 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1541 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1542 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1543 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1544 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1545 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1547 /* is this a slowpath msg? */
1548 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1549 bnx2x_sp_event(fp, cqe);
1552 /* this is an rx packet */
1554 rx_buf = &fp->rx_buf_ring[bd_cons];
1556 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1557 pad = cqe->fast_path_cqe.placement_offset;
1559 /* If CQE is marked both TPA_START and TPA_END
1560 it is a non-TPA CQE */
1561 if ((!fp->disable_tpa) &&
1562 (TPA_TYPE(cqe_fp_flags) !=
1563 (TPA_TYPE_START | TPA_TYPE_END))) {
1564 u16 queue = cqe->fast_path_cqe.queue_index;
1566 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1567 DP(NETIF_MSG_RX_STATUS,
1568 "calling tpa_start on queue %d\n",
1571 bnx2x_tpa_start(fp, queue, skb,
1576 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1577 DP(NETIF_MSG_RX_STATUS,
1578 "calling tpa_stop on queue %d\n",
1581 if (!BNX2X_RX_SUM_FIX(cqe))
1582 BNX2X_ERR("STOP on none TCP "
1585 /* This is a size of the linear data
1587 len = le16_to_cpu(cqe->fast_path_cqe.
1589 bnx2x_tpa_stop(bp, fp, queue, pad,
1590 len, cqe, comp_ring_cons);
1591 #ifdef BNX2X_STOP_ON_ERROR
1596 bnx2x_update_sge_prod(fp,
1597 &cqe->fast_path_cqe);
1602 pci_dma_sync_single_for_device(bp->pdev,
1603 pci_unmap_addr(rx_buf, mapping),
1604 pad + RX_COPY_THRESH,
1605 PCI_DMA_FROMDEVICE);
1607 prefetch(((char *)(skb)) + 128);
1609 /* is this an error packet? */
1610 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1611 DP(NETIF_MSG_RX_ERR,
1612 "ERROR flags %x rx packet %u\n",
1613 cqe_fp_flags, sw_comp_cons);
1614 fp->eth_q_stats.rx_err_discard_pkt++;
1618 /* Since we don't have a jumbo ring
1619 * copy small packets if mtu > 1500
1621 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1622 (len <= RX_COPY_THRESH)) {
1623 struct sk_buff *new_skb;
1625 new_skb = netdev_alloc_skb(bp->dev,
1627 if (new_skb == NULL) {
1628 DP(NETIF_MSG_RX_ERR,
1629 "ERROR packet dropped "
1630 "because of alloc failure\n");
1631 fp->eth_q_stats.rx_skb_alloc_failed++;
1636 skb_copy_from_linear_data_offset(skb, pad,
1637 new_skb->data + pad, len);
1638 skb_reserve(new_skb, pad);
1639 skb_put(new_skb, len);
1641 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1646 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1647 pci_unmap_single(bp->pdev,
1648 pci_unmap_addr(rx_buf, mapping),
1650 PCI_DMA_FROMDEVICE);
1651 skb_reserve(skb, pad);
1655 DP(NETIF_MSG_RX_ERR,
1656 "ERROR packet dropped because "
1657 "of alloc failure\n");
1658 fp->eth_q_stats.rx_skb_alloc_failed++;
1660 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1664 skb->protocol = eth_type_trans(skb, bp->dev);
1666 skb->ip_summed = CHECKSUM_NONE;
1668 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1669 skb->ip_summed = CHECKSUM_UNNECESSARY;
1671 fp->eth_q_stats.hw_csum_err++;
1675 skb_record_rx_queue(skb, fp->index);
1678 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1679 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1680 PARSING_FLAGS_VLAN))
1681 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1682 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1685 netif_receive_skb(skb);
1691 bd_cons = NEXT_RX_IDX(bd_cons);
1692 bd_prod = NEXT_RX_IDX(bd_prod);
1693 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1696 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1697 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1699 if (rx_pkt == budget)
1703 fp->rx_bd_cons = bd_cons;
1704 fp->rx_bd_prod = bd_prod_fw;
1705 fp->rx_comp_cons = sw_comp_cons;
1706 fp->rx_comp_prod = sw_comp_prod;
1708 /* Update producers */
1709 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1712 fp->rx_pkt += rx_pkt;
1718 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1720 struct bnx2x_fastpath *fp = fp_cookie;
1721 struct bnx2x *bp = fp->bp;
1723 /* Return here if interrupt is disabled */
1724 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1725 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1729 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1730 fp->index, fp->sb_id);
1731 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1733 #ifdef BNX2X_STOP_ON_ERROR
1734 if (unlikely(bp->panic))
1737 /* Handle Rx or Tx according to MSI-X vector */
1738 if (fp->is_rx_queue) {
1739 prefetch(fp->rx_cons_sb);
1740 prefetch(&fp->status_blk->u_status_block.status_block_index);
1742 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1745 prefetch(fp->tx_cons_sb);
1746 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748 bnx2x_update_fpsb_idx(fp);
1752 /* Re-enable interrupts */
1753 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1754 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1755 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1756 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1762 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1764 struct bnx2x *bp = netdev_priv(dev_instance);
1765 u16 status = bnx2x_ack_int(bp);
1769 /* Return here if interrupt is shared and it's not for us */
1770 if (unlikely(status == 0)) {
1771 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1774 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1776 /* Return here if interrupt is disabled */
1777 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1778 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1782 #ifdef BNX2X_STOP_ON_ERROR
1783 if (unlikely(bp->panic))
1787 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1788 struct bnx2x_fastpath *fp = &bp->fp[i];
1790 mask = 0x2 << fp->sb_id;
1791 if (status & mask) {
1792 /* Handle Rx or Tx according to SB id */
1793 if (fp->is_rx_queue) {
1794 prefetch(fp->rx_cons_sb);
1795 prefetch(&fp->status_blk->u_status_block.
1796 status_block_index);
1798 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1801 prefetch(fp->tx_cons_sb);
1802 prefetch(&fp->status_blk->c_status_block.
1803 status_block_index);
1805 bnx2x_update_fpsb_idx(fp);
1809 /* Re-enable interrupts */
1810 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1811 le16_to_cpu(fp->fp_u_idx),
1813 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1814 le16_to_cpu(fp->fp_c_idx),
1822 mask = 0x2 << CNIC_SB_ID(bp);
1823 if (status & (mask | 0x1)) {
1824 struct cnic_ops *c_ops = NULL;
1827 c_ops = rcu_dereference(bp->cnic_ops);
1829 c_ops->cnic_handler(bp->cnic_data, NULL);
1836 if (unlikely(status & 0x1)) {
1837 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1845 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1851 /* end of fast path */
1853 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1858 * General service functions
1861 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1864 u32 resource_bit = (1 << resource);
1865 int func = BP_FUNC(bp);
1866 u32 hw_lock_control_reg;
1869 /* Validating that the resource is within range */
1870 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1872 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1873 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1878 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1880 hw_lock_control_reg =
1881 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1884 /* Validating that the resource is not already taken */
1885 lock_status = REG_RD(bp, hw_lock_control_reg);
1886 if (lock_status & resource_bit) {
1887 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1888 lock_status, resource_bit);
1892 /* Try for 5 second every 5ms */
1893 for (cnt = 0; cnt < 1000; cnt++) {
1894 /* Try to acquire the lock */
1895 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1896 lock_status = REG_RD(bp, hw_lock_control_reg);
1897 if (lock_status & resource_bit)
1902 DP(NETIF_MSG_HW, "Timeout\n");
1906 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1909 u32 resource_bit = (1 << resource);
1910 int func = BP_FUNC(bp);
1911 u32 hw_lock_control_reg;
1913 /* Validating that the resource is within range */
1914 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1916 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1917 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1922 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1924 hw_lock_control_reg =
1925 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1928 /* Validating that the resource is currently taken */
1929 lock_status = REG_RD(bp, hw_lock_control_reg);
1930 if (!(lock_status & resource_bit)) {
1931 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1932 lock_status, resource_bit);
1936 REG_WR(bp, hw_lock_control_reg, resource_bit);
1940 /* HW Lock for shared dual port PHYs */
1941 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1943 mutex_lock(&bp->port.phy_mutex);
1945 if (bp->port.need_hw_lock)
1946 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1949 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1951 if (bp->port.need_hw_lock)
1952 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1954 mutex_unlock(&bp->port.phy_mutex);
1957 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1959 /* The GPIO should be swapped if swap register is set and active */
1960 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1961 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1962 int gpio_shift = gpio_num +
1963 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1964 u32 gpio_mask = (1 << gpio_shift);
1968 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1969 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1973 /* read GPIO value */
1974 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1976 /* get the requested pin value */
1977 if ((gpio_reg & gpio_mask) == gpio_mask)
1982 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1987 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 /* The GPIO should be swapped if swap register is set and active */
1990 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1991 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1992 int gpio_shift = gpio_num +
1993 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1994 u32 gpio_mask = (1 << gpio_shift);
1997 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1998 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2002 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2003 /* read GPIO and mask except the float bits */
2004 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2007 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2008 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2009 gpio_num, gpio_shift);
2010 /* clear FLOAT and set CLR */
2011 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2012 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2015 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2016 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2017 gpio_num, gpio_shift);
2018 /* clear FLOAT and set SET */
2019 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2020 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2023 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2024 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2025 gpio_num, gpio_shift);
2027 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2034 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2035 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2040 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2042 /* The GPIO should be swapped if swap register is set and active */
2043 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2044 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2045 int gpio_shift = gpio_num +
2046 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2047 u32 gpio_mask = (1 << gpio_shift);
2050 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2051 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2055 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2060 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2061 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2062 "output low\n", gpio_num, gpio_shift);
2063 /* clear SET and set CLR */
2064 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2065 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2068 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2069 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2070 "output high\n", gpio_num, gpio_shift);
2071 /* clear CLR and set SET */
2072 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2073 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2080 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2081 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2086 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2088 u32 spio_mask = (1 << spio_num);
2091 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2092 (spio_num > MISC_REGISTERS_SPIO_7)) {
2093 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2097 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2098 /* read SPIO and mask except the float bits */
2099 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2102 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2103 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2104 /* clear FLOAT and set CLR */
2105 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2106 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2109 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2110 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2111 /* clear FLOAT and set SET */
2112 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2113 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2116 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2117 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2119 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2126 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2127 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2132 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2134 switch (bp->link_vars.ieee_fc &
2135 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2136 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2137 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2141 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2142 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2146 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2147 bp->port.advertising |= ADVERTISED_Asym_Pause;
2151 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2157 static void bnx2x_link_report(struct bnx2x *bp)
2159 if (bp->flags & MF_FUNC_DIS) {
2160 netif_carrier_off(bp->dev);
2161 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2165 if (bp->link_vars.link_up) {
2166 if (bp->state == BNX2X_STATE_OPEN)
2167 netif_carrier_on(bp->dev);
2168 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2170 printk("%d Mbps ", bp->link_vars.line_speed);
2172 if (bp->link_vars.duplex == DUPLEX_FULL)
2173 printk("full duplex");
2175 printk("half duplex");
2177 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2178 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2179 printk(", receive ");
2180 if (bp->link_vars.flow_ctrl &
2182 printk("& transmit ");
2184 printk(", transmit ");
2186 printk("flow control ON");
2190 } else { /* link_down */
2191 netif_carrier_off(bp->dev);
2192 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2196 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2198 if (!BP_NOMCP(bp)) {
2201 /* Initialize link parameters structure variables */
2202 /* It is recommended to turn off RX FC for jumbo frames
2203 for better performance */
2204 if (bp->dev->mtu > 5000)
2205 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2207 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2209 bnx2x_acquire_phy_lock(bp);
2211 if (load_mode == LOAD_DIAG)
2212 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2214 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2216 bnx2x_release_phy_lock(bp);
2218 bnx2x_calc_fc_adv(bp);
2220 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2221 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2222 bnx2x_link_report(bp);
2227 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2231 static void bnx2x_link_set(struct bnx2x *bp)
2233 if (!BP_NOMCP(bp)) {
2234 bnx2x_acquire_phy_lock(bp);
2235 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2236 bnx2x_release_phy_lock(bp);
2238 bnx2x_calc_fc_adv(bp);
2240 BNX2X_ERR("Bootcode is missing - can not set link\n");
2243 static void bnx2x__link_reset(struct bnx2x *bp)
2245 if (!BP_NOMCP(bp)) {
2246 bnx2x_acquire_phy_lock(bp);
2247 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2248 bnx2x_release_phy_lock(bp);
2250 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2253 static u8 bnx2x_link_test(struct bnx2x *bp)
2257 bnx2x_acquire_phy_lock(bp);
2258 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2259 bnx2x_release_phy_lock(bp);
2264 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2266 u32 r_param = bp->link_vars.line_speed / 8;
2267 u32 fair_periodic_timeout_usec;
2270 memset(&(bp->cmng.rs_vars), 0,
2271 sizeof(struct rate_shaping_vars_per_port));
2272 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2274 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2275 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2277 /* this is the threshold below which no timer arming will occur
2278 1.25 coefficient is for the threshold to be a little bigger
2279 than the real time, to compensate for timer in-accuracy */
2280 bp->cmng.rs_vars.rs_threshold =
2281 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2283 /* resolution of fairness timer */
2284 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2285 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2286 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2288 /* this is the threshold below which we won't arm the timer anymore */
2289 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2291 /* we multiply by 1e3/8 to get bytes/msec.
2292 We don't want the credits to pass a credit
2293 of the t_fair*FAIR_MEM (algorithm resolution) */
2294 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2295 /* since each tick is 4 usec */
2296 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2299 /* Calculates the sum of vn_min_rates.
2300 It's needed for further normalizing of the min_rates.
2302 sum of vn_min_rates.
2304 0 - if all the min_rates are 0.
2305 In the later case fainess algorithm should be deactivated.
2306 If not all min_rates are zero then those that are zeroes will be set to 1.
2308 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2311 int port = BP_PORT(bp);
2314 bp->vn_weight_sum = 0;
2315 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2316 int func = 2*vn + port;
2317 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2318 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2319 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2321 /* Skip hidden vns */
2322 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2325 /* If min rate is zero - set it to 1 */
2327 vn_min_rate = DEF_MIN_RATE;
2331 bp->vn_weight_sum += vn_min_rate;
2334 /* ... only if all min rates are zeros - disable fairness */
2336 bp->cmng.flags.cmng_enables &=
2337 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2338 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2339 " fairness will be disabled\n");
2341 bp->cmng.flags.cmng_enables |=
2342 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2345 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2347 struct rate_shaping_vars_per_vn m_rs_vn;
2348 struct fairness_vars_per_vn m_fair_vn;
2349 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2350 u16 vn_min_rate, vn_max_rate;
2353 /* If function is hidden - set min and max to zeroes */
2354 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2359 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2360 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2361 /* If min rate is zero - set it to 1 */
2363 vn_min_rate = DEF_MIN_RATE;
2364 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2365 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2368 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2369 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2371 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2372 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2374 /* global vn counter - maximal Mbps for this vn */
2375 m_rs_vn.vn_counter.rate = vn_max_rate;
2377 /* quota - number of bytes transmitted in this period */
2378 m_rs_vn.vn_counter.quota =
2379 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2381 if (bp->vn_weight_sum) {
2382 /* credit for each period of the fairness algorithm:
2383 number of bytes in T_FAIR (the vn share the port rate).
2384 vn_weight_sum should not be larger than 10000, thus
2385 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2387 m_fair_vn.vn_credit_delta =
2388 max((u32)(vn_min_rate * (T_FAIR_COEF /
2389 (8 * bp->vn_weight_sum))),
2390 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2391 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2392 m_fair_vn.vn_credit_delta);
2395 /* Store it to internal memory */
2396 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2397 REG_WR(bp, BAR_XSTRORM_INTMEM +
2398 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2399 ((u32 *)(&m_rs_vn))[i]);
2401 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2402 REG_WR(bp, BAR_XSTRORM_INTMEM +
2403 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2404 ((u32 *)(&m_fair_vn))[i]);
2408 /* This function is called upon link interrupt */
2409 static void bnx2x_link_attn(struct bnx2x *bp)
2411 /* Make sure that we are synced with the current statistics */
2412 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2414 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2416 if (bp->link_vars.link_up) {
2418 /* dropless flow control */
2419 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2420 int port = BP_PORT(bp);
2421 u32 pause_enabled = 0;
2423 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2426 REG_WR(bp, BAR_USTRORM_INTMEM +
2427 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2431 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2432 struct host_port_stats *pstats;
2434 pstats = bnx2x_sp(bp, port_stats);
2435 /* reset old bmac stats */
2436 memset(&(pstats->mac_stx[0]), 0,
2437 sizeof(struct mac_stx));
2439 if (bp->state == BNX2X_STATE_OPEN)
2440 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2443 /* indicate link status */
2444 bnx2x_link_report(bp);
2447 int port = BP_PORT(bp);
2451 /* Set the attention towards other drivers on the same port */
2452 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2453 if (vn == BP_E1HVN(bp))
2456 func = ((vn << 1) | port);
2457 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2458 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2461 if (bp->link_vars.link_up) {
2464 /* Init rate shaping and fairness contexts */
2465 bnx2x_init_port_minmax(bp);
2467 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2468 bnx2x_init_vn_minmax(bp, 2*vn + port);
2470 /* Store it to internal memory */
2472 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2473 REG_WR(bp, BAR_XSTRORM_INTMEM +
2474 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2475 ((u32 *)(&bp->cmng))[i]);
2480 static void bnx2x__link_status_update(struct bnx2x *bp)
2482 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2485 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2487 if (bp->link_vars.link_up)
2488 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2490 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2492 bnx2x_calc_vn_weight_sum(bp);
2494 /* indicate link status */
2495 bnx2x_link_report(bp);
2498 static void bnx2x_pmf_update(struct bnx2x *bp)
2500 int port = BP_PORT(bp);
2504 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2506 /* enable nig attention */
2507 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2508 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2509 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2511 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2519 * General service functions
2522 /* send the MCP a request, block until there is a reply */
2523 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2525 int func = BP_FUNC(bp);
2526 u32 seq = ++bp->fw_seq;
2529 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2531 mutex_lock(&bp->fw_mb_mutex);
2532 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2533 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2536 /* let the FW do it's magic ... */
2539 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2541 /* Give the FW up to 5 second (500*10ms) */
2542 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2544 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2545 cnt*delay, rc, seq);
2547 /* is this a reply to our command? */
2548 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2549 rc &= FW_MSG_CODE_MASK;
2552 BNX2X_ERR("FW failed to respond!\n");
2556 mutex_unlock(&bp->fw_mb_mutex);
2561 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2562 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2563 static void bnx2x_set_rx_mode(struct net_device *dev);
2565 static void bnx2x_e1h_disable(struct bnx2x *bp)
2567 int port = BP_PORT(bp);
2570 bp->rx_mode = BNX2X_RX_MODE_NONE;
2571 bnx2x_set_storm_rx_mode(bp);
2573 netif_tx_disable(bp->dev);
2574 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2576 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2578 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2580 for (i = 0; i < MC_HASH_SIZE; i++)
2581 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2583 netif_carrier_off(bp->dev);
2586 static void bnx2x_e1h_enable(struct bnx2x *bp)
2588 int port = BP_PORT(bp);
2590 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2592 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2594 /* Tx queue should be only reenabled */
2595 netif_tx_wake_all_queues(bp->dev);
2597 /* Initialize the receive filter. */
2598 bnx2x_set_rx_mode(bp->dev);
2601 static void bnx2x_update_min_max(struct bnx2x *bp)
2603 int port = BP_PORT(bp);
2606 /* Init rate shaping and fairness contexts */
2607 bnx2x_init_port_minmax(bp);
2609 bnx2x_calc_vn_weight_sum(bp);
2611 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2612 bnx2x_init_vn_minmax(bp, 2*vn + port);
2617 /* Set the attention towards other drivers on the same port */
2618 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2619 if (vn == BP_E1HVN(bp))
2622 func = ((vn << 1) | port);
2623 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2624 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2627 /* Store it to internal memory */
2628 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2629 REG_WR(bp, BAR_XSTRORM_INTMEM +
2630 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2631 ((u32 *)(&bp->cmng))[i]);
2635 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2637 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2639 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2642 * This is the only place besides the function initialization
2643 * where the bp->flags can change so it is done without any
2646 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2647 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2648 bp->flags |= MF_FUNC_DIS;
2650 bnx2x_e1h_disable(bp);
2652 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2653 bp->flags &= ~MF_FUNC_DIS;
2655 bnx2x_e1h_enable(bp);
2657 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2659 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2661 bnx2x_update_min_max(bp);
2662 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2665 /* Report results to MCP */
2667 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2669 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2672 /* must be called under the spq lock */
2673 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2675 struct eth_spe *next_spe = bp->spq_prod_bd;
2677 if (bp->spq_prod_bd == bp->spq_last_bd) {
2678 bp->spq_prod_bd = bp->spq;
2679 bp->spq_prod_idx = 0;
2680 DP(NETIF_MSG_TIMER, "end of spq\n");
2688 /* must be called under the spq lock */
2689 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2691 int func = BP_FUNC(bp);
2693 /* Make sure that BD data is updated before writing the producer */
2696 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2701 /* the slow path queue is odd since completions arrive on the fastpath ring */
2702 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2703 u32 data_hi, u32 data_lo, int common)
2705 struct eth_spe *spe;
2707 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2708 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2709 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2710 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2711 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2713 #ifdef BNX2X_STOP_ON_ERROR
2714 if (unlikely(bp->panic))
2718 spin_lock_bh(&bp->spq_lock);
2720 if (!bp->spq_left) {
2721 BNX2X_ERR("BUG! SPQ ring full!\n");
2722 spin_unlock_bh(&bp->spq_lock);
2727 spe = bnx2x_sp_get_next(bp);
2729 /* CID needs port number to be encoded int it */
2730 spe->hdr.conn_and_cmd_data =
2731 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2733 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2736 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2738 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2739 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2743 bnx2x_sp_prod_update(bp);
2744 spin_unlock_bh(&bp->spq_lock);
2748 /* acquire split MCP access lock register */
2749 static int bnx2x_acquire_alr(struct bnx2x *bp)
2756 for (j = 0; j < i*10; j++) {
2758 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2760 if (val & (1L << 31))
2765 if (!(val & (1L << 31))) {
2766 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2773 /* release split MCP access lock register */
2774 static void bnx2x_release_alr(struct bnx2x *bp)
2778 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2781 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2783 struct host_def_status_block *def_sb = bp->def_status_blk;
2786 barrier(); /* status block is written to by the chip */
2787 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2788 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2791 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2792 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2795 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2796 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2799 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2800 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2803 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2804 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2811 * slow path service functions
2814 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2816 int port = BP_PORT(bp);
2817 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2818 COMMAND_REG_ATTN_BITS_SET);
2819 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2820 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2821 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2822 NIG_REG_MASK_INTERRUPT_PORT0;
2826 if (bp->attn_state & asserted)
2827 BNX2X_ERR("IGU ERROR\n");
2829 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2830 aeu_mask = REG_RD(bp, aeu_addr);
2832 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2833 aeu_mask, asserted);
2834 aeu_mask &= ~(asserted & 0xff);
2835 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2837 REG_WR(bp, aeu_addr, aeu_mask);
2838 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2840 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2841 bp->attn_state |= asserted;
2842 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2844 if (asserted & ATTN_HARD_WIRED_MASK) {
2845 if (asserted & ATTN_NIG_FOR_FUNC) {
2847 bnx2x_acquire_phy_lock(bp);
2849 /* save nig interrupt mask */
2850 nig_mask = REG_RD(bp, nig_int_mask_addr);
2851 REG_WR(bp, nig_int_mask_addr, 0);
2853 bnx2x_link_attn(bp);
2855 /* handle unicore attn? */
2857 if (asserted & ATTN_SW_TIMER_4_FUNC)
2858 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2860 if (asserted & GPIO_2_FUNC)
2861 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2863 if (asserted & GPIO_3_FUNC)
2864 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2866 if (asserted & GPIO_4_FUNC)
2867 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2870 if (asserted & ATTN_GENERAL_ATTN_1) {
2871 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2872 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2874 if (asserted & ATTN_GENERAL_ATTN_2) {
2875 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2876 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2878 if (asserted & ATTN_GENERAL_ATTN_3) {
2879 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2880 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2883 if (asserted & ATTN_GENERAL_ATTN_4) {
2884 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2885 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2887 if (asserted & ATTN_GENERAL_ATTN_5) {
2888 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2889 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2891 if (asserted & ATTN_GENERAL_ATTN_6) {
2892 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2893 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2897 } /* if hardwired */
2899 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2901 REG_WR(bp, hc_addr, asserted);
2903 /* now set back the mask */
2904 if (asserted & ATTN_NIG_FOR_FUNC) {
2905 REG_WR(bp, nig_int_mask_addr, nig_mask);
2906 bnx2x_release_phy_lock(bp);
2910 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2912 int port = BP_PORT(bp);
2914 /* mark the failure */
2915 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2916 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2917 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2918 bp->link_params.ext_phy_config);
2920 /* log the failure */
2921 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2922 " the driver to shutdown the card to prevent permanent"
2923 " damage. Please contact Dell Support for assistance\n",
2927 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2929 int port = BP_PORT(bp);
2931 u32 val, swap_val, swap_override;
2933 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2934 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2936 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2938 val = REG_RD(bp, reg_offset);
2939 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2940 REG_WR(bp, reg_offset, val);
2942 BNX2X_ERR("SPIO5 hw attention\n");
2944 /* Fan failure attention */
2945 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2947 /* Low power mode is controlled by GPIO 2 */
2948 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2949 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2950 /* The PHY reset is controlled by GPIO 1 */
2951 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2952 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2955 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2956 /* The PHY reset is controlled by GPIO 1 */
2957 /* fake the port number to cancel the swap done in
2959 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2960 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2961 port = (swap_val && swap_override) ^ 1;
2962 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2963 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2969 bnx2x_fan_failure(bp);
2972 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2973 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2974 bnx2x_acquire_phy_lock(bp);
2975 bnx2x_handle_module_detect_int(&bp->link_params);
2976 bnx2x_release_phy_lock(bp);
2979 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2981 val = REG_RD(bp, reg_offset);
2982 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2983 REG_WR(bp, reg_offset, val);
2985 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2986 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2991 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2995 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2997 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2998 BNX2X_ERR("DB hw attention 0x%x\n", val);
2999 /* DORQ discard attention */
3001 BNX2X_ERR("FATAL error from DORQ\n");
3004 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3006 int port = BP_PORT(bp);
3009 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3010 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3012 val = REG_RD(bp, reg_offset);
3013 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3014 REG_WR(bp, reg_offset, val);
3016 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3017 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3022 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3026 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3028 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3029 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3030 /* CFC error attention */
3032 BNX2X_ERR("FATAL error from CFC\n");
3035 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3037 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3038 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3039 /* RQ_USDMDP_FIFO_OVERFLOW */
3041 BNX2X_ERR("FATAL error from PXP\n");
3044 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3046 int port = BP_PORT(bp);
3049 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3050 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3052 val = REG_RD(bp, reg_offset);
3053 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3054 REG_WR(bp, reg_offset, val);
3056 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3057 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3062 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3066 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3068 if (attn & BNX2X_PMF_LINK_ASSERT) {
3069 int func = BP_FUNC(bp);
3071 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3072 bp->mf_config = SHMEM_RD(bp,
3073 mf_cfg.func_mf_config[func].config);
3074 val = SHMEM_RD(bp, func_mb[func].drv_status);
3075 if (val & DRV_STATUS_DCC_EVENT_MASK)
3077 (val & DRV_STATUS_DCC_EVENT_MASK));
3078 bnx2x__link_status_update(bp);
3079 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3080 bnx2x_pmf_update(bp);
3082 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3084 BNX2X_ERR("MC assert!\n");
3085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3086 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3088 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3091 } else if (attn & BNX2X_MCP_ASSERT) {
3093 BNX2X_ERR("MCP assert!\n");
3094 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3098 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3101 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3102 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3103 if (attn & BNX2X_GRC_TIMEOUT) {
3104 val = CHIP_IS_E1H(bp) ?
3105 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3106 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3108 if (attn & BNX2X_GRC_RSV) {
3109 val = CHIP_IS_E1H(bp) ?
3110 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3111 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3113 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3117 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3119 struct attn_route attn;
3120 struct attn_route group_mask;
3121 int port = BP_PORT(bp);
3127 /* need to take HW lock because MCP or other port might also
3128 try to handle this event */
3129 bnx2x_acquire_alr(bp);
3131 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3132 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3133 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3134 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3135 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3136 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3138 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3139 if (deasserted & (1 << index)) {
3140 group_mask = bp->attn_group[index];
3142 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3143 index, group_mask.sig[0], group_mask.sig[1],
3144 group_mask.sig[2], group_mask.sig[3]);
3146 bnx2x_attn_int_deasserted3(bp,
3147 attn.sig[3] & group_mask.sig[3]);
3148 bnx2x_attn_int_deasserted1(bp,
3149 attn.sig[1] & group_mask.sig[1]);
3150 bnx2x_attn_int_deasserted2(bp,
3151 attn.sig[2] & group_mask.sig[2]);
3152 bnx2x_attn_int_deasserted0(bp,
3153 attn.sig[0] & group_mask.sig[0]);
3155 if ((attn.sig[0] & group_mask.sig[0] &
3156 HW_PRTY_ASSERT_SET_0) ||
3157 (attn.sig[1] & group_mask.sig[1] &
3158 HW_PRTY_ASSERT_SET_1) ||
3159 (attn.sig[2] & group_mask.sig[2] &
3160 HW_PRTY_ASSERT_SET_2))
3161 BNX2X_ERR("FATAL HW block parity attention\n");
3165 bnx2x_release_alr(bp);
3167 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3170 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3172 REG_WR(bp, reg_addr, val);
3174 if (~bp->attn_state & deasserted)
3175 BNX2X_ERR("IGU ERROR\n");
3177 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3178 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3180 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3181 aeu_mask = REG_RD(bp, reg_addr);
3183 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3184 aeu_mask, deasserted);
3185 aeu_mask |= (deasserted & 0xff);
3186 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3188 REG_WR(bp, reg_addr, aeu_mask);
3189 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3191 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3192 bp->attn_state &= ~deasserted;
3193 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3196 static void bnx2x_attn_int(struct bnx2x *bp)
3198 /* read local copy of bits */
3199 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3201 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3203 u32 attn_state = bp->attn_state;
3205 /* look for changed bits */
3206 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3207 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3210 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3211 attn_bits, attn_ack, asserted, deasserted);
3213 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3214 BNX2X_ERR("BAD attention state\n");
3216 /* handle bits that were raised */
3218 bnx2x_attn_int_asserted(bp, asserted);
3221 bnx2x_attn_int_deasserted(bp, deasserted);
3224 static void bnx2x_sp_task(struct work_struct *work)
3226 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3230 /* Return here if interrupt is disabled */
3231 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3232 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3236 status = bnx2x_update_dsb_idx(bp);
3237 /* if (status == 0) */
3238 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3240 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3246 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3248 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3250 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3252 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3254 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3259 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3261 struct net_device *dev = dev_instance;
3262 struct bnx2x *bp = netdev_priv(dev);
3264 /* Return here if interrupt is disabled */
3265 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3266 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3270 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3272 #ifdef BNX2X_STOP_ON_ERROR
3273 if (unlikely(bp->panic))
3279 struct cnic_ops *c_ops;
3282 c_ops = rcu_dereference(bp->cnic_ops);
3284 c_ops->cnic_handler(bp->cnic_data, NULL);
3288 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3293 /* end of slow path */
3297 /****************************************************************************
3299 ****************************************************************************/
3301 /* sum[hi:lo] += add[hi:lo] */
3302 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3305 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3308 /* difference = minuend - subtrahend */
3309 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3311 if (m_lo < s_lo) { \
3313 d_hi = m_hi - s_hi; \
3315 /* we can 'loan' 1 */ \
3317 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3319 /* m_hi <= s_hi */ \
3324 /* m_lo >= s_lo */ \
3325 if (m_hi < s_hi) { \
3329 /* m_hi >= s_hi */ \
3330 d_hi = m_hi - s_hi; \
3331 d_lo = m_lo - s_lo; \
3336 #define UPDATE_STAT64(s, t) \
3338 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3339 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3340 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3341 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3342 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3343 pstats->mac_stx[1].t##_lo, diff.lo); \
3346 #define UPDATE_STAT64_NIG(s, t) \
3348 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3349 diff.lo, new->s##_lo, old->s##_lo); \
3350 ADD_64(estats->t##_hi, diff.hi, \
3351 estats->t##_lo, diff.lo); \
3354 /* sum[hi:lo] += add */
3355 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3358 s_hi += (s_lo < a) ? 1 : 0; \
3361 #define UPDATE_EXTEND_STAT(s) \
3363 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3364 pstats->mac_stx[1].s##_lo, \
3368 #define UPDATE_EXTEND_TSTAT(s, t) \
3370 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3371 old_tclient->s = tclient->s; \
3372 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3375 #define UPDATE_EXTEND_USTAT(s, t) \
3377 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3378 old_uclient->s = uclient->s; \
3379 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3382 #define UPDATE_EXTEND_XSTAT(s, t) \
3384 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3385 old_xclient->s = xclient->s; \
3386 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3389 /* minuend -= subtrahend */
3390 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3392 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3395 /* minuend[hi:lo] -= subtrahend */
3396 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3398 SUB_64(m_hi, 0, m_lo, s); \
3401 #define SUB_EXTEND_USTAT(s, t) \
3403 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3404 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3408 * General service functions
3411 static inline long bnx2x_hilo(u32 *hiref)
3413 u32 lo = *(hiref + 1);
3414 #if (BITS_PER_LONG == 64)
3417 return HILO_U64(hi, lo);
3424 * Init service functions
3427 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3429 if (!bp->stats_pending) {
3430 struct eth_query_ramrod_data ramrod_data = {0};
3433 ramrod_data.drv_counter = bp->stats_counter++;
3434 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3435 for_each_queue(bp, i)
3436 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3438 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3439 ((u32 *)&ramrod_data)[1],
3440 ((u32 *)&ramrod_data)[0], 0);
3442 /* stats ramrod has it's own slot on the spq */
3444 bp->stats_pending = 1;
3449 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3451 struct dmae_command *dmae = &bp->stats_dmae;
3452 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3454 *stats_comp = DMAE_COMP_VAL;
3455 if (CHIP_REV_IS_SLOW(bp))
3459 if (bp->executer_idx) {
3460 int loader_idx = PMF_DMAE_C(bp);
3462 memset(dmae, 0, sizeof(struct dmae_command));
3464 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3465 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3466 DMAE_CMD_DST_RESET |
3468 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3470 DMAE_CMD_ENDIANITY_DW_SWAP |
3472 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3474 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3475 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3476 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3477 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3478 sizeof(struct dmae_command) *
3479 (loader_idx + 1)) >> 2;
3480 dmae->dst_addr_hi = 0;
3481 dmae->len = sizeof(struct dmae_command) >> 2;
3484 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3485 dmae->comp_addr_hi = 0;
3489 bnx2x_post_dmae(bp, dmae, loader_idx);
3491 } else if (bp->func_stx) {
3493 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3497 static int bnx2x_stats_comp(struct bnx2x *bp)
3499 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3503 while (*stats_comp != DMAE_COMP_VAL) {
3505 BNX2X_ERR("timeout waiting for stats finished\n");
3515 * Statistics service functions
3518 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3520 struct dmae_command *dmae;
3522 int loader_idx = PMF_DMAE_C(bp);
3523 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3526 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3527 BNX2X_ERR("BUG!\n");
3531 bp->executer_idx = 0;
3533 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3535 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3537 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3539 DMAE_CMD_ENDIANITY_DW_SWAP |
3541 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3542 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3544 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3545 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3546 dmae->src_addr_lo = bp->port.port_stx >> 2;
3547 dmae->src_addr_hi = 0;
3548 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3549 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3550 dmae->len = DMAE_LEN32_RD_MAX;
3551 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3552 dmae->comp_addr_hi = 0;
3555 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3556 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3557 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3558 dmae->src_addr_hi = 0;
3559 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3560 DMAE_LEN32_RD_MAX * 4);
3561 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3562 DMAE_LEN32_RD_MAX * 4);
3563 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3566 dmae->comp_val = DMAE_COMP_VAL;
3569 bnx2x_hw_stats_post(bp);
3570 bnx2x_stats_comp(bp);
3573 static void bnx2x_port_stats_init(struct bnx2x *bp)
3575 struct dmae_command *dmae;
3576 int port = BP_PORT(bp);
3577 int vn = BP_E1HVN(bp);
3579 int loader_idx = PMF_DMAE_C(bp);
3581 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3584 if (!bp->link_vars.link_up || !bp->port.pmf) {
3585 BNX2X_ERR("BUG!\n");
3589 bp->executer_idx = 0;
3592 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3593 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3594 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3596 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3598 DMAE_CMD_ENDIANITY_DW_SWAP |
3600 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3601 (vn << DMAE_CMD_E1HVN_SHIFT));
3603 if (bp->port.port_stx) {
3605 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3606 dmae->opcode = opcode;
3607 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3608 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3609 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3610 dmae->dst_addr_hi = 0;
3611 dmae->len = sizeof(struct host_port_stats) >> 2;
3612 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3613 dmae->comp_addr_hi = 0;
3619 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3620 dmae->opcode = opcode;
3621 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3622 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3623 dmae->dst_addr_lo = bp->func_stx >> 2;
3624 dmae->dst_addr_hi = 0;
3625 dmae->len = sizeof(struct host_func_stats) >> 2;
3626 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3627 dmae->comp_addr_hi = 0;
3632 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3633 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3634 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3636 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3638 DMAE_CMD_ENDIANITY_DW_SWAP |
3640 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3641 (vn << DMAE_CMD_E1HVN_SHIFT));
3643 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3645 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3646 NIG_REG_INGRESS_BMAC0_MEM);
3648 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3649 BIGMAC_REGISTER_TX_STAT_GTBYT */
3650 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3651 dmae->opcode = opcode;
3652 dmae->src_addr_lo = (mac_addr +
3653 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3654 dmae->src_addr_hi = 0;
3655 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3656 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3657 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3658 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3659 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3660 dmae->comp_addr_hi = 0;
3663 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3664 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3665 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666 dmae->opcode = opcode;
3667 dmae->src_addr_lo = (mac_addr +
3668 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3669 dmae->src_addr_hi = 0;
3670 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3671 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3672 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3673 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3674 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3675 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3676 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3677 dmae->comp_addr_hi = 0;
3680 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3682 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3684 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3685 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3686 dmae->opcode = opcode;
3687 dmae->src_addr_lo = (mac_addr +
3688 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3689 dmae->src_addr_hi = 0;
3690 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3691 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3692 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3693 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3694 dmae->comp_addr_hi = 0;
3697 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3698 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3699 dmae->opcode = opcode;
3700 dmae->src_addr_lo = (mac_addr +
3701 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3702 dmae->src_addr_hi = 0;
3703 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3704 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3705 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3706 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3708 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3709 dmae->comp_addr_hi = 0;
3712 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3713 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3714 dmae->opcode = opcode;
3715 dmae->src_addr_lo = (mac_addr +
3716 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3717 dmae->src_addr_hi = 0;
3718 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3719 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3720 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3721 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3722 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3723 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3724 dmae->comp_addr_hi = 0;
3729 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3730 dmae->opcode = opcode;
3731 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3732 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3733 dmae->src_addr_hi = 0;
3734 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3735 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3736 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3737 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3738 dmae->comp_addr_hi = 0;
3741 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3742 dmae->opcode = opcode;
3743 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3744 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3745 dmae->src_addr_hi = 0;
3746 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3747 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3748 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3749 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3750 dmae->len = (2*sizeof(u32)) >> 2;
3751 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3752 dmae->comp_addr_hi = 0;
3755 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3756 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3757 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3758 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3760 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3762 DMAE_CMD_ENDIANITY_DW_SWAP |
3764 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3765 (vn << DMAE_CMD_E1HVN_SHIFT));
3766 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3767 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3768 dmae->src_addr_hi = 0;
3769 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3770 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3771 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3772 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3773 dmae->len = (2*sizeof(u32)) >> 2;
3774 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3775 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3776 dmae->comp_val = DMAE_COMP_VAL;
3781 static void bnx2x_func_stats_init(struct bnx2x *bp)
3783 struct dmae_command *dmae = &bp->stats_dmae;
3784 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3787 if (!bp->func_stx) {
3788 BNX2X_ERR("BUG!\n");
3792 bp->executer_idx = 0;
3793 memset(dmae, 0, sizeof(struct dmae_command));
3795 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3796 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3797 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3799 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3801 DMAE_CMD_ENDIANITY_DW_SWAP |
3803 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3804 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3805 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3806 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3807 dmae->dst_addr_lo = bp->func_stx >> 2;
3808 dmae->dst_addr_hi = 0;
3809 dmae->len = sizeof(struct host_func_stats) >> 2;
3810 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3811 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3812 dmae->comp_val = DMAE_COMP_VAL;
3817 static void bnx2x_stats_start(struct bnx2x *bp)
3820 bnx2x_port_stats_init(bp);
3822 else if (bp->func_stx)
3823 bnx2x_func_stats_init(bp);
3825 bnx2x_hw_stats_post(bp);
3826 bnx2x_storm_stats_post(bp);
3829 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3831 bnx2x_stats_comp(bp);
3832 bnx2x_stats_pmf_update(bp);
3833 bnx2x_stats_start(bp);
3836 static void bnx2x_stats_restart(struct bnx2x *bp)
3838 bnx2x_stats_comp(bp);
3839 bnx2x_stats_start(bp);
3842 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3844 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3845 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3846 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3852 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3853 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3854 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3855 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3856 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3857 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3858 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3859 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3860 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3861 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3862 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3863 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3864 UPDATE_STAT64(tx_stat_gt127,
3865 tx_stat_etherstatspkts65octetsto127octets);
3866 UPDATE_STAT64(tx_stat_gt255,
3867 tx_stat_etherstatspkts128octetsto255octets);
3868 UPDATE_STAT64(tx_stat_gt511,
3869 tx_stat_etherstatspkts256octetsto511octets);
3870 UPDATE_STAT64(tx_stat_gt1023,
3871 tx_stat_etherstatspkts512octetsto1023octets);
3872 UPDATE_STAT64(tx_stat_gt1518,
3873 tx_stat_etherstatspkts1024octetsto1522octets);
3874 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3875 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3876 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3877 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3878 UPDATE_STAT64(tx_stat_gterr,
3879 tx_stat_dot3statsinternalmactransmiterrors);
3880 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3882 estats->pause_frames_received_hi =
3883 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3884 estats->pause_frames_received_lo =
3885 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3887 estats->pause_frames_sent_hi =
3888 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3889 estats->pause_frames_sent_lo =
3890 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3893 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3895 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3896 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3897 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3899 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3900 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3901 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3902 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3903 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3904 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3905 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3906 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3907 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3908 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3909 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3910 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3911 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3912 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3913 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3914 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3915 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3916 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3917 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3918 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3919 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3920 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3921 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3922 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3923 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3924 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3925 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3926 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3927 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3928 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3929 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3931 estats->pause_frames_received_hi =
3932 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3933 estats->pause_frames_received_lo =
3934 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3935 ADD_64(estats->pause_frames_received_hi,
3936 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3937 estats->pause_frames_received_lo,
3938 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3940 estats->pause_frames_sent_hi =
3941 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3942 estats->pause_frames_sent_lo =
3943 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3944 ADD_64(estats->pause_frames_sent_hi,
3945 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3946 estats->pause_frames_sent_lo,
3947 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3950 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3952 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3953 struct nig_stats *old = &(bp->port.old_nig_stats);
3954 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3955 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3962 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3963 bnx2x_bmac_stats_update(bp);
3965 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3966 bnx2x_emac_stats_update(bp);
3968 else { /* unreached */
3969 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3973 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3974 new->brb_discard - old->brb_discard);
3975 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3976 new->brb_truncate - old->brb_truncate);
3978 UPDATE_STAT64_NIG(egress_mac_pkt0,
3979 etherstatspkts1024octetsto1522octets);
3980 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3982 memcpy(old, new, sizeof(struct nig_stats));
3984 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3985 sizeof(struct mac_stx));
3986 estats->brb_drop_hi = pstats->brb_drop_hi;
3987 estats->brb_drop_lo = pstats->brb_drop_lo;
3989 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3991 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3992 if (nig_timer_max != estats->nig_timer_max) {
3993 estats->nig_timer_max = nig_timer_max;
3994 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
4000 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4002 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4003 struct tstorm_per_port_stats *tport =
4004 &stats->tstorm_common.port_statistics;
4005 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4006 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4009 memcpy(&(fstats->total_bytes_received_hi),
4010 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4011 sizeof(struct host_func_stats) - 2*sizeof(u32));
4012 estats->error_bytes_received_hi = 0;
4013 estats->error_bytes_received_lo = 0;
4014 estats->etherstatsoverrsizepkts_hi = 0;
4015 estats->etherstatsoverrsizepkts_lo = 0;
4016 estats->no_buff_discard_hi = 0;
4017 estats->no_buff_discard_lo = 0;
4019 for_each_rx_queue(bp, i) {
4020 struct bnx2x_fastpath *fp = &bp->fp[i];
4021 int cl_id = fp->cl_id;
4022 struct tstorm_per_client_stats *tclient =
4023 &stats->tstorm_common.client_statistics[cl_id];
4024 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4025 struct ustorm_per_client_stats *uclient =
4026 &stats->ustorm_common.client_statistics[cl_id];
4027 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4028 struct xstorm_per_client_stats *xclient =
4029 &stats->xstorm_common.client_statistics[cl_id];
4030 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4031 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4034 /* are storm stats valid? */
4035 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4036 bp->stats_counter) {
4037 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4038 " xstorm counter (%d) != stats_counter (%d)\n",
4039 i, xclient->stats_counter, bp->stats_counter);
4042 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4043 bp->stats_counter) {
4044 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4045 " tstorm counter (%d) != stats_counter (%d)\n",
4046 i, tclient->stats_counter, bp->stats_counter);
4049 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4050 bp->stats_counter) {
4051 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4052 " ustorm counter (%d) != stats_counter (%d)\n",
4053 i, uclient->stats_counter, bp->stats_counter);
4057 qstats->total_bytes_received_hi =
4058 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4059 qstats->total_bytes_received_lo =
4060 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4062 ADD_64(qstats->total_bytes_received_hi,
4063 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4064 qstats->total_bytes_received_lo,
4065 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4067 ADD_64(qstats->total_bytes_received_hi,
4068 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4069 qstats->total_bytes_received_lo,
4070 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4072 qstats->valid_bytes_received_hi =
4073 qstats->total_bytes_received_hi;
4074 qstats->valid_bytes_received_lo =
4075 qstats->total_bytes_received_lo;
4077 qstats->error_bytes_received_hi =
4078 le32_to_cpu(tclient->rcv_error_bytes.hi);
4079 qstats->error_bytes_received_lo =
4080 le32_to_cpu(tclient->rcv_error_bytes.lo);
4082 ADD_64(qstats->total_bytes_received_hi,
4083 qstats->error_bytes_received_hi,
4084 qstats->total_bytes_received_lo,
4085 qstats->error_bytes_received_lo);
4087 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4088 total_unicast_packets_received);
4089 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4090 total_multicast_packets_received);
4091 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4092 total_broadcast_packets_received);
4093 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4094 etherstatsoverrsizepkts);
4095 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4097 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4098 total_unicast_packets_received);
4099 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4100 total_multicast_packets_received);
4101 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4102 total_broadcast_packets_received);
4103 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4104 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4105 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4107 qstats->total_bytes_transmitted_hi =
4108 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4109 qstats->total_bytes_transmitted_lo =
4110 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4112 ADD_64(qstats->total_bytes_transmitted_hi,
4113 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4114 qstats->total_bytes_transmitted_lo,
4115 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4117 ADD_64(qstats->total_bytes_transmitted_hi,
4118 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4119 qstats->total_bytes_transmitted_lo,
4120 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4122 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4123 total_unicast_packets_transmitted);
4124 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4125 total_multicast_packets_transmitted);
4126 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4127 total_broadcast_packets_transmitted);
4129 old_tclient->checksum_discard = tclient->checksum_discard;
4130 old_tclient->ttl0_discard = tclient->ttl0_discard;
4132 ADD_64(fstats->total_bytes_received_hi,
4133 qstats->total_bytes_received_hi,
4134 fstats->total_bytes_received_lo,
4135 qstats->total_bytes_received_lo);
4136 ADD_64(fstats->total_bytes_transmitted_hi,
4137 qstats->total_bytes_transmitted_hi,
4138 fstats->total_bytes_transmitted_lo,
4139 qstats->total_bytes_transmitted_lo);
4140 ADD_64(fstats->total_unicast_packets_received_hi,
4141 qstats->total_unicast_packets_received_hi,
4142 fstats->total_unicast_packets_received_lo,
4143 qstats->total_unicast_packets_received_lo);
4144 ADD_64(fstats->total_multicast_packets_received_hi,
4145 qstats->total_multicast_packets_received_hi,
4146 fstats->total_multicast_packets_received_lo,
4147 qstats->total_multicast_packets_received_lo);
4148 ADD_64(fstats->total_broadcast_packets_received_hi,
4149 qstats->total_broadcast_packets_received_hi,
4150 fstats->total_broadcast_packets_received_lo,
4151 qstats->total_broadcast_packets_received_lo);
4152 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4153 qstats->total_unicast_packets_transmitted_hi,
4154 fstats->total_unicast_packets_transmitted_lo,
4155 qstats->total_unicast_packets_transmitted_lo);
4156 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4157 qstats->total_multicast_packets_transmitted_hi,
4158 fstats->total_multicast_packets_transmitted_lo,
4159 qstats->total_multicast_packets_transmitted_lo);
4160 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4161 qstats->total_broadcast_packets_transmitted_hi,
4162 fstats->total_broadcast_packets_transmitted_lo,
4163 qstats->total_broadcast_packets_transmitted_lo);
4164 ADD_64(fstats->valid_bytes_received_hi,
4165 qstats->valid_bytes_received_hi,
4166 fstats->valid_bytes_received_lo,
4167 qstats->valid_bytes_received_lo);
4169 ADD_64(estats->error_bytes_received_hi,
4170 qstats->error_bytes_received_hi,
4171 estats->error_bytes_received_lo,
4172 qstats->error_bytes_received_lo);
4173 ADD_64(estats->etherstatsoverrsizepkts_hi,
4174 qstats->etherstatsoverrsizepkts_hi,
4175 estats->etherstatsoverrsizepkts_lo,
4176 qstats->etherstatsoverrsizepkts_lo);
4177 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4178 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4181 ADD_64(fstats->total_bytes_received_hi,
4182 estats->rx_stat_ifhcinbadoctets_hi,
4183 fstats->total_bytes_received_lo,
4184 estats->rx_stat_ifhcinbadoctets_lo);
4186 memcpy(estats, &(fstats->total_bytes_received_hi),
4187 sizeof(struct host_func_stats) - 2*sizeof(u32));
4189 ADD_64(estats->etherstatsoverrsizepkts_hi,
4190 estats->rx_stat_dot3statsframestoolong_hi,
4191 estats->etherstatsoverrsizepkts_lo,
4192 estats->rx_stat_dot3statsframestoolong_lo);
4193 ADD_64(estats->error_bytes_received_hi,
4194 estats->rx_stat_ifhcinbadoctets_hi,
4195 estats->error_bytes_received_lo,
4196 estats->rx_stat_ifhcinbadoctets_lo);
4199 estats->mac_filter_discard =
4200 le32_to_cpu(tport->mac_filter_discard);
4201 estats->xxoverflow_discard =
4202 le32_to_cpu(tport->xxoverflow_discard);
4203 estats->brb_truncate_discard =
4204 le32_to_cpu(tport->brb_truncate_discard);
4205 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4208 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4210 bp->stats_pending = 0;
4215 static void bnx2x_net_stats_update(struct bnx2x *bp)
4217 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4218 struct net_device_stats *nstats = &bp->dev->stats;
4221 nstats->rx_packets =
4222 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4223 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4224 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4226 nstats->tx_packets =
4227 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4228 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4229 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4231 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4233 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4235 nstats->rx_dropped = estats->mac_discard;
4236 for_each_rx_queue(bp, i)
4237 nstats->rx_dropped +=
4238 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4240 nstats->tx_dropped = 0;
4243 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4245 nstats->collisions =
4246 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4248 nstats->rx_length_errors =
4249 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4250 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4251 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4252 bnx2x_hilo(&estats->brb_truncate_hi);
4253 nstats->rx_crc_errors =
4254 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4255 nstats->rx_frame_errors =
4256 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4257 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4258 nstats->rx_missed_errors = estats->xxoverflow_discard;
4260 nstats->rx_errors = nstats->rx_length_errors +
4261 nstats->rx_over_errors +
4262 nstats->rx_crc_errors +
4263 nstats->rx_frame_errors +
4264 nstats->rx_fifo_errors +
4265 nstats->rx_missed_errors;
4267 nstats->tx_aborted_errors =
4268 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4269 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4270 nstats->tx_carrier_errors =
4271 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4272 nstats->tx_fifo_errors = 0;
4273 nstats->tx_heartbeat_errors = 0;
4274 nstats->tx_window_errors = 0;
4276 nstats->tx_errors = nstats->tx_aborted_errors +
4277 nstats->tx_carrier_errors +
4278 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4281 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4283 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4286 estats->driver_xoff = 0;
4287 estats->rx_err_discard_pkt = 0;
4288 estats->rx_skb_alloc_failed = 0;
4289 estats->hw_csum_err = 0;
4290 for_each_rx_queue(bp, i) {
4291 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4293 estats->driver_xoff += qstats->driver_xoff;
4294 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4295 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4296 estats->hw_csum_err += qstats->hw_csum_err;
4300 static void bnx2x_stats_update(struct bnx2x *bp)
4302 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4304 if (*stats_comp != DMAE_COMP_VAL)
4308 bnx2x_hw_stats_update(bp);
4310 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4311 BNX2X_ERR("storm stats were not updated for 3 times\n");
4316 bnx2x_net_stats_update(bp);
4317 bnx2x_drv_stats_update(bp);
4319 if (bp->msglevel & NETIF_MSG_TIMER) {
4320 struct bnx2x_fastpath *fp0_rx = bp->fp;
4321 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4322 struct tstorm_per_client_stats *old_tclient =
4323 &bp->fp->old_tclient;
4324 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4325 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4326 struct net_device_stats *nstats = &bp->dev->stats;
4329 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4330 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4332 bnx2x_tx_avail(fp0_tx),
4333 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4334 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4336 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4337 fp0_rx->rx_comp_cons),
4338 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4339 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4340 "brb truncate %u\n",
4341 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4342 qstats->driver_xoff,
4343 estats->brb_drop_lo, estats->brb_truncate_lo);
4344 printk(KERN_DEBUG "tstats: checksum_discard %u "
4345 "packets_too_big_discard %lu no_buff_discard %lu "
4346 "mac_discard %u mac_filter_discard %u "
4347 "xxovrflow_discard %u brb_truncate_discard %u "
4348 "ttl0_discard %u\n",
4349 le32_to_cpu(old_tclient->checksum_discard),
4350 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4351 bnx2x_hilo(&qstats->no_buff_discard_hi),
4352 estats->mac_discard, estats->mac_filter_discard,
4353 estats->xxoverflow_discard, estats->brb_truncate_discard,
4354 le32_to_cpu(old_tclient->ttl0_discard));
4356 for_each_queue(bp, i) {
4357 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4358 bnx2x_fp(bp, i, tx_pkt),
4359 bnx2x_fp(bp, i, rx_pkt),
4360 bnx2x_fp(bp, i, rx_calls));
4364 bnx2x_hw_stats_post(bp);
4365 bnx2x_storm_stats_post(bp);
4368 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4370 struct dmae_command *dmae;
4372 int loader_idx = PMF_DMAE_C(bp);
4373 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4375 bp->executer_idx = 0;
4377 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4379 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4381 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4383 DMAE_CMD_ENDIANITY_DW_SWAP |
4385 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4386 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4388 if (bp->port.port_stx) {
4390 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4392 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4394 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4395 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4396 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4397 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4398 dmae->dst_addr_hi = 0;
4399 dmae->len = sizeof(struct host_port_stats) >> 2;
4401 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4402 dmae->comp_addr_hi = 0;
4405 dmae->comp_addr_lo =
4406 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4407 dmae->comp_addr_hi =
4408 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4409 dmae->comp_val = DMAE_COMP_VAL;
4417 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4418 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4419 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4420 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4421 dmae->dst_addr_lo = bp->func_stx >> 2;
4422 dmae->dst_addr_hi = 0;
4423 dmae->len = sizeof(struct host_func_stats) >> 2;
4424 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4425 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4426 dmae->comp_val = DMAE_COMP_VAL;
4432 static void bnx2x_stats_stop(struct bnx2x *bp)
4436 bnx2x_stats_comp(bp);
4439 update = (bnx2x_hw_stats_update(bp) == 0);
4441 update |= (bnx2x_storm_stats_update(bp) == 0);
4444 bnx2x_net_stats_update(bp);
4447 bnx2x_port_stats_stop(bp);
4449 bnx2x_hw_stats_post(bp);
4450 bnx2x_stats_comp(bp);
4454 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4458 static const struct {
4459 void (*action)(struct bnx2x *bp);
4460 enum bnx2x_stats_state next_state;
4461 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4464 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4465 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4466 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4467 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4470 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4471 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4472 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4473 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4477 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4479 enum bnx2x_stats_state state = bp->stats_state;
4481 bnx2x_stats_stm[state][event].action(bp);
4482 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4484 /* Make sure the state has been "changed" */
4487 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4488 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4489 state, event, bp->stats_state);
4492 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4494 struct dmae_command *dmae;
4495 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4498 if (!bp->port.pmf || !bp->port.port_stx) {
4499 BNX2X_ERR("BUG!\n");
4503 bp->executer_idx = 0;
4505 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4506 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4507 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4508 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4510 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4512 DMAE_CMD_ENDIANITY_DW_SWAP |
4514 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4515 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4516 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4517 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4518 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4519 dmae->dst_addr_hi = 0;
4520 dmae->len = sizeof(struct host_port_stats) >> 2;
4521 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4522 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4523 dmae->comp_val = DMAE_COMP_VAL;
4526 bnx2x_hw_stats_post(bp);
4527 bnx2x_stats_comp(bp);
4530 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4532 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4533 int port = BP_PORT(bp);
4538 if (!bp->port.pmf || !bp->func_stx) {
4539 BNX2X_ERR("BUG!\n");
4543 /* save our func_stx */
4544 func_stx = bp->func_stx;
4546 for (vn = VN_0; vn < vn_max; vn++) {
4549 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4550 bnx2x_func_stats_init(bp);
4551 bnx2x_hw_stats_post(bp);
4552 bnx2x_stats_comp(bp);
4555 /* restore our func_stx */
4556 bp->func_stx = func_stx;
4559 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4561 struct dmae_command *dmae = &bp->stats_dmae;
4562 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4565 if (!bp->func_stx) {
4566 BNX2X_ERR("BUG!\n");
4570 bp->executer_idx = 0;
4571 memset(dmae, 0, sizeof(struct dmae_command));
4573 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4574 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4575 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4577 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4579 DMAE_CMD_ENDIANITY_DW_SWAP |
4581 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4582 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4583 dmae->src_addr_lo = bp->func_stx >> 2;
4584 dmae->src_addr_hi = 0;
4585 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4586 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4587 dmae->len = sizeof(struct host_func_stats) >> 2;
4588 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4589 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4590 dmae->comp_val = DMAE_COMP_VAL;
4593 bnx2x_hw_stats_post(bp);
4594 bnx2x_stats_comp(bp);
4597 static void bnx2x_stats_init(struct bnx2x *bp)
4599 int port = BP_PORT(bp);
4600 int func = BP_FUNC(bp);
4603 bp->stats_pending = 0;
4604 bp->executer_idx = 0;
4605 bp->stats_counter = 0;
4607 /* port and func stats for management */
4608 if (!BP_NOMCP(bp)) {
4609 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4610 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4613 bp->port.port_stx = 0;
4616 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4617 bp->port.port_stx, bp->func_stx);
4620 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4621 bp->port.old_nig_stats.brb_discard =
4622 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4623 bp->port.old_nig_stats.brb_truncate =
4624 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4625 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4626 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4627 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4628 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4630 /* function stats */
4631 for_each_queue(bp, i) {
4632 struct bnx2x_fastpath *fp = &bp->fp[i];
4634 memset(&fp->old_tclient, 0,
4635 sizeof(struct tstorm_per_client_stats));
4636 memset(&fp->old_uclient, 0,
4637 sizeof(struct ustorm_per_client_stats));
4638 memset(&fp->old_xclient, 0,
4639 sizeof(struct xstorm_per_client_stats));
4640 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4643 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4644 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4646 bp->stats_state = STATS_STATE_DISABLED;
4649 if (bp->port.port_stx)
4650 bnx2x_port_stats_base_init(bp);
4653 bnx2x_func_stats_base_init(bp);
4655 } else if (bp->func_stx)
4656 bnx2x_func_stats_base_update(bp);
4659 static void bnx2x_timer(unsigned long data)
4661 struct bnx2x *bp = (struct bnx2x *) data;
4663 if (!netif_running(bp->dev))
4666 if (atomic_read(&bp->intr_sem) != 0)
4670 struct bnx2x_fastpath *fp = &bp->fp[0];
4674 rc = bnx2x_rx_int(fp, 1000);
4677 if (!BP_NOMCP(bp)) {
4678 int func = BP_FUNC(bp);
4682 ++bp->fw_drv_pulse_wr_seq;
4683 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4684 /* TBD - add SYSTEM_TIME */
4685 drv_pulse = bp->fw_drv_pulse_wr_seq;
4686 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4688 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4689 MCP_PULSE_SEQ_MASK);
4690 /* The delta between driver pulse and mcp response
4691 * should be 1 (before mcp response) or 0 (after mcp response)
4693 if ((drv_pulse != mcp_pulse) &&
4694 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4695 /* someone lost a heartbeat... */
4696 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4697 drv_pulse, mcp_pulse);
4701 if (bp->state == BNX2X_STATE_OPEN)
4702 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4705 mod_timer(&bp->timer, jiffies + bp->current_interval);
4708 /* end of Statistics */
4713 * nic init service functions
4716 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4718 int port = BP_PORT(bp);
4721 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4722 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4723 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4724 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4725 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4726 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4729 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4730 dma_addr_t mapping, int sb_id)
4732 int port = BP_PORT(bp);
4733 int func = BP_FUNC(bp);
4738 section = ((u64)mapping) + offsetof(struct host_status_block,
4740 sb->u_status_block.status_block_id = sb_id;
4742 REG_WR(bp, BAR_CSTRORM_INTMEM +
4743 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4744 REG_WR(bp, BAR_CSTRORM_INTMEM +
4745 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4747 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4748 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4750 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4751 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4752 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4755 section = ((u64)mapping) + offsetof(struct host_status_block,
4757 sb->c_status_block.status_block_id = sb_id;
4759 REG_WR(bp, BAR_CSTRORM_INTMEM +
4760 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4761 REG_WR(bp, BAR_CSTRORM_INTMEM +
4762 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4764 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4765 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4767 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4768 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4769 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4771 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4774 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4776 int func = BP_FUNC(bp);
4778 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4779 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4780 sizeof(struct tstorm_def_status_block)/4);
4781 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4782 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4783 sizeof(struct cstorm_def_status_block_u)/4);
4784 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4785 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4786 sizeof(struct cstorm_def_status_block_c)/4);
4787 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4788 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4789 sizeof(struct xstorm_def_status_block)/4);
4792 static void bnx2x_init_def_sb(struct bnx2x *bp,
4793 struct host_def_status_block *def_sb,
4794 dma_addr_t mapping, int sb_id)
4796 int port = BP_PORT(bp);
4797 int func = BP_FUNC(bp);
4798 int index, val, reg_offset;
4802 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4803 atten_status_block);
4804 def_sb->atten_status_block.status_block_id = sb_id;
4808 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4809 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4811 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4812 bp->attn_group[index].sig[0] = REG_RD(bp,
4813 reg_offset + 0x10*index);
4814 bp->attn_group[index].sig[1] = REG_RD(bp,
4815 reg_offset + 0x4 + 0x10*index);
4816 bp->attn_group[index].sig[2] = REG_RD(bp,
4817 reg_offset + 0x8 + 0x10*index);
4818 bp->attn_group[index].sig[3] = REG_RD(bp,
4819 reg_offset + 0xc + 0x10*index);
4822 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4823 HC_REG_ATTN_MSG0_ADDR_L);
4825 REG_WR(bp, reg_offset, U64_LO(section));
4826 REG_WR(bp, reg_offset + 4, U64_HI(section));
4828 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4830 val = REG_RD(bp, reg_offset);
4832 REG_WR(bp, reg_offset, val);
4835 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4836 u_def_status_block);
4837 def_sb->u_def_status_block.status_block_id = sb_id;
4839 REG_WR(bp, BAR_CSTRORM_INTMEM +
4840 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4841 REG_WR(bp, BAR_CSTRORM_INTMEM +
4842 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4844 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4845 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4847 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4848 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4849 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4852 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4853 c_def_status_block);
4854 def_sb->c_def_status_block.status_block_id = sb_id;
4856 REG_WR(bp, BAR_CSTRORM_INTMEM +
4857 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4858 REG_WR(bp, BAR_CSTRORM_INTMEM +
4859 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4861 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4862 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4864 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4865 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4866 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4869 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4870 t_def_status_block);
4871 def_sb->t_def_status_block.status_block_id = sb_id;
4873 REG_WR(bp, BAR_TSTRORM_INTMEM +
4874 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4875 REG_WR(bp, BAR_TSTRORM_INTMEM +
4876 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4878 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4879 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4881 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4882 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4883 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4886 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4887 x_def_status_block);
4888 def_sb->x_def_status_block.status_block_id = sb_id;
4890 REG_WR(bp, BAR_XSTRORM_INTMEM +
4891 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4892 REG_WR(bp, BAR_XSTRORM_INTMEM +
4893 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4895 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4896 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4898 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4899 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4900 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4902 bp->stats_pending = 0;
4903 bp->set_mac_pending = 0;
4905 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4908 static void bnx2x_update_coalesce(struct bnx2x *bp)
4910 int port = BP_PORT(bp);
4913 for_each_queue(bp, i) {
4914 int sb_id = bp->fp[i].sb_id;
4916 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4917 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4918 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4919 U_SB_ETH_RX_CQ_INDEX),
4921 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4922 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4923 U_SB_ETH_RX_CQ_INDEX),
4924 (bp->rx_ticks/12) ? 0 : 1);
4926 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4927 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4928 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4929 C_SB_ETH_TX_CQ_INDEX),
4931 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4932 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4933 C_SB_ETH_TX_CQ_INDEX),
4934 (bp->tx_ticks/12) ? 0 : 1);
4938 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4939 struct bnx2x_fastpath *fp, int last)
4943 for (i = 0; i < last; i++) {
4944 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4945 struct sk_buff *skb = rx_buf->skb;
4948 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4952 if (fp->tpa_state[i] == BNX2X_TPA_START)
4953 pci_unmap_single(bp->pdev,
4954 pci_unmap_addr(rx_buf, mapping),
4955 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4962 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4964 int func = BP_FUNC(bp);
4965 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4966 ETH_MAX_AGGREGATION_QUEUES_E1H;
4967 u16 ring_prod, cqe_ring_prod;
4970 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4972 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4974 if (bp->flags & TPA_ENABLE_FLAG) {
4976 for_each_rx_queue(bp, j) {
4977 struct bnx2x_fastpath *fp = &bp->fp[j];
4979 for (i = 0; i < max_agg_queues; i++) {
4980 fp->tpa_pool[i].skb =
4981 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4982 if (!fp->tpa_pool[i].skb) {
4983 BNX2X_ERR("Failed to allocate TPA "
4984 "skb pool for queue[%d] - "
4985 "disabling TPA on this "
4987 bnx2x_free_tpa_pool(bp, fp, i);
4988 fp->disable_tpa = 1;
4991 pci_unmap_addr_set((struct sw_rx_bd *)
4992 &bp->fp->tpa_pool[i],
4994 fp->tpa_state[i] = BNX2X_TPA_STOP;
4999 for_each_rx_queue(bp, j) {
5000 struct bnx2x_fastpath *fp = &bp->fp[j];
5003 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5004 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5006 /* Mark queue as Rx */
5007 fp->is_rx_queue = 1;
5009 /* "next page" elements initialization */
5011 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5012 struct eth_rx_sge *sge;
5014 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5016 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5017 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5019 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5020 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5023 bnx2x_init_sge_ring_bit_mask(fp);
5026 for (i = 1; i <= NUM_RX_RINGS; i++) {
5027 struct eth_rx_bd *rx_bd;
5029 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5031 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5032 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5034 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5035 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5039 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5040 struct eth_rx_cqe_next_page *nextpg;
5042 nextpg = (struct eth_rx_cqe_next_page *)
5043 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5045 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5046 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5048 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5049 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5052 /* Allocate SGEs and initialize the ring elements */
5053 for (i = 0, ring_prod = 0;
5054 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5056 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5057 BNX2X_ERR("was only able to allocate "
5059 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5060 /* Cleanup already allocated elements */
5061 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5062 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5063 fp->disable_tpa = 1;
5067 ring_prod = NEXT_SGE_IDX(ring_prod);
5069 fp->rx_sge_prod = ring_prod;
5071 /* Allocate BDs and initialize BD ring */
5072 fp->rx_comp_cons = 0;
5073 cqe_ring_prod = ring_prod = 0;
5074 for (i = 0; i < bp->rx_ring_size; i++) {
5075 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5076 BNX2X_ERR("was only able to allocate "
5077 "%d rx skbs on queue[%d]\n", i, j);
5078 fp->eth_q_stats.rx_skb_alloc_failed++;
5081 ring_prod = NEXT_RX_IDX(ring_prod);
5082 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5083 WARN_ON(ring_prod <= i);
5086 fp->rx_bd_prod = ring_prod;
5087 /* must not have more available CQEs than BDs */
5088 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5090 fp->rx_pkt = fp->rx_calls = 0;
5093 * this will generate an interrupt (to the TSTORM)
5094 * must only be done after chip is initialized
5096 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5101 REG_WR(bp, BAR_USTRORM_INTMEM +
5102 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5103 U64_LO(fp->rx_comp_mapping));
5104 REG_WR(bp, BAR_USTRORM_INTMEM +
5105 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5106 U64_HI(fp->rx_comp_mapping));
5110 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5114 for_each_tx_queue(bp, j) {
5115 struct bnx2x_fastpath *fp = &bp->fp[j];
5117 for (i = 1; i <= NUM_TX_RINGS; i++) {
5118 struct eth_tx_next_bd *tx_next_bd =
5119 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5121 tx_next_bd->addr_hi =
5122 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5123 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5124 tx_next_bd->addr_lo =
5125 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5126 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5129 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5130 fp->tx_db.data.zero_fill1 = 0;
5131 fp->tx_db.data.prod = 0;
5133 fp->tx_pkt_prod = 0;
5134 fp->tx_pkt_cons = 0;
5137 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5141 /* clean tx statistics */
5142 for_each_rx_queue(bp, i)
5143 bnx2x_fp(bp, i, tx_pkt) = 0;
5146 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5148 int func = BP_FUNC(bp);
5150 spin_lock_init(&bp->spq_lock);
5152 bp->spq_left = MAX_SPQ_PENDING;
5153 bp->spq_prod_idx = 0;
5154 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5155 bp->spq_prod_bd = bp->spq;
5156 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5158 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5159 U64_LO(bp->spq_mapping));
5161 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5162 U64_HI(bp->spq_mapping));
5164 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5168 static void bnx2x_init_context(struct bnx2x *bp)
5172 for_each_rx_queue(bp, i) {
5173 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5174 struct bnx2x_fastpath *fp = &bp->fp[i];
5175 u8 cl_id = fp->cl_id;
5177 context->ustorm_st_context.common.sb_index_numbers =
5178 BNX2X_RX_SB_INDEX_NUM;
5179 context->ustorm_st_context.common.clientId = cl_id;
5180 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5181 context->ustorm_st_context.common.flags =
5182 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5183 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5184 context->ustorm_st_context.common.statistics_counter_id =
5186 context->ustorm_st_context.common.mc_alignment_log_size =
5187 BNX2X_RX_ALIGN_SHIFT;
5188 context->ustorm_st_context.common.bd_buff_size =
5190 context->ustorm_st_context.common.bd_page_base_hi =
5191 U64_HI(fp->rx_desc_mapping);
5192 context->ustorm_st_context.common.bd_page_base_lo =
5193 U64_LO(fp->rx_desc_mapping);
5194 if (!fp->disable_tpa) {
5195 context->ustorm_st_context.common.flags |=
5196 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5197 context->ustorm_st_context.common.sge_buff_size =
5198 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5200 context->ustorm_st_context.common.sge_page_base_hi =
5201 U64_HI(fp->rx_sge_mapping);
5202 context->ustorm_st_context.common.sge_page_base_lo =
5203 U64_LO(fp->rx_sge_mapping);
5205 context->ustorm_st_context.common.max_sges_for_packet =
5206 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5207 context->ustorm_st_context.common.max_sges_for_packet =
5208 ((context->ustorm_st_context.common.
5209 max_sges_for_packet + PAGES_PER_SGE - 1) &
5210 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5213 context->ustorm_ag_context.cdu_usage =
5214 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5215 CDU_REGION_NUMBER_UCM_AG,
5216 ETH_CONNECTION_TYPE);
5218 context->xstorm_ag_context.cdu_reserved =
5219 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5220 CDU_REGION_NUMBER_XCM_AG,
5221 ETH_CONNECTION_TYPE);
5224 for_each_tx_queue(bp, i) {
5225 struct bnx2x_fastpath *fp = &bp->fp[i];
5226 struct eth_context *context =
5227 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5229 context->cstorm_st_context.sb_index_number =
5230 C_SB_ETH_TX_CQ_INDEX;
5231 context->cstorm_st_context.status_block_id = fp->sb_id;
5233 context->xstorm_st_context.tx_bd_page_base_hi =
5234 U64_HI(fp->tx_desc_mapping);
5235 context->xstorm_st_context.tx_bd_page_base_lo =
5236 U64_LO(fp->tx_desc_mapping);
5237 context->xstorm_st_context.statistics_data = (fp->cl_id |
5238 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5242 static void bnx2x_init_ind_table(struct bnx2x *bp)
5244 int func = BP_FUNC(bp);
5247 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5251 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5252 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5253 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5254 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5255 bp->fp->cl_id + (i % bp->num_rx_queues));
5258 static void bnx2x_set_client_config(struct bnx2x *bp)
5260 struct tstorm_eth_client_config tstorm_client = {0};
5261 int port = BP_PORT(bp);
5264 tstorm_client.mtu = bp->dev->mtu;
5265 tstorm_client.config_flags =
5266 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5267 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5269 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5270 tstorm_client.config_flags |=
5271 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5272 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5276 for_each_queue(bp, i) {
5277 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5279 REG_WR(bp, BAR_TSTRORM_INTMEM +
5280 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5281 ((u32 *)&tstorm_client)[0]);
5282 REG_WR(bp, BAR_TSTRORM_INTMEM +
5283 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5284 ((u32 *)&tstorm_client)[1]);
5287 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5288 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5291 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5293 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5294 int mode = bp->rx_mode;
5295 int mask = bp->rx_mode_cl_mask;
5296 int func = BP_FUNC(bp);
5297 int port = BP_PORT(bp);
5299 /* All but management unicast packets should pass to the host as well */
5301 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5302 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5303 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5304 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5306 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5309 case BNX2X_RX_MODE_NONE: /* no Rx */
5310 tstorm_mac_filter.ucast_drop_all = mask;
5311 tstorm_mac_filter.mcast_drop_all = mask;
5312 tstorm_mac_filter.bcast_drop_all = mask;
5315 case BNX2X_RX_MODE_NORMAL:
5316 tstorm_mac_filter.bcast_accept_all = mask;
5319 case BNX2X_RX_MODE_ALLMULTI:
5320 tstorm_mac_filter.mcast_accept_all = mask;
5321 tstorm_mac_filter.bcast_accept_all = mask;
5324 case BNX2X_RX_MODE_PROMISC:
5325 tstorm_mac_filter.ucast_accept_all = mask;
5326 tstorm_mac_filter.mcast_accept_all = mask;
5327 tstorm_mac_filter.bcast_accept_all = mask;
5328 /* pass management unicast packets as well */
5329 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5333 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5338 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5341 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5342 REG_WR(bp, BAR_TSTRORM_INTMEM +
5343 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5344 ((u32 *)&tstorm_mac_filter)[i]);
5346 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5347 ((u32 *)&tstorm_mac_filter)[i]); */
5350 if (mode != BNX2X_RX_MODE_NONE)
5351 bnx2x_set_client_config(bp);
5354 static void bnx2x_init_internal_common(struct bnx2x *bp)
5358 /* Zero this manually as its initialization is
5359 currently missing in the initTool */
5360 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5361 REG_WR(bp, BAR_USTRORM_INTMEM +
5362 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5365 static void bnx2x_init_internal_port(struct bnx2x *bp)
5367 int port = BP_PORT(bp);
5370 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5372 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5373 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5374 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5377 static void bnx2x_init_internal_func(struct bnx2x *bp)
5379 struct tstorm_eth_function_common_config tstorm_config = {0};
5380 struct stats_indication_flags stats_flags = {0};
5381 int port = BP_PORT(bp);
5382 int func = BP_FUNC(bp);
5388 tstorm_config.config_flags = MULTI_FLAGS(bp);
5389 tstorm_config.rss_result_mask = MULTI_MASK;
5392 /* Enable TPA if needed */
5393 if (bp->flags & TPA_ENABLE_FLAG)
5394 tstorm_config.config_flags |=
5395 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5398 tstorm_config.config_flags |=
5399 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5401 tstorm_config.leading_client_id = BP_L_ID(bp);
5403 REG_WR(bp, BAR_TSTRORM_INTMEM +
5404 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5405 (*(u32 *)&tstorm_config));
5407 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5408 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5409 bnx2x_set_storm_rx_mode(bp);
5411 for_each_queue(bp, i) {
5412 u8 cl_id = bp->fp[i].cl_id;
5414 /* reset xstorm per client statistics */
5415 offset = BAR_XSTRORM_INTMEM +
5416 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5418 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5419 REG_WR(bp, offset + j*4, 0);
5421 /* reset tstorm per client statistics */
5422 offset = BAR_TSTRORM_INTMEM +
5423 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5425 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5426 REG_WR(bp, offset + j*4, 0);
5428 /* reset ustorm per client statistics */
5429 offset = BAR_USTRORM_INTMEM +
5430 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5432 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5433 REG_WR(bp, offset + j*4, 0);
5436 /* Init statistics related context */
5437 stats_flags.collect_eth = 1;
5439 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5440 ((u32 *)&stats_flags)[0]);
5441 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5442 ((u32 *)&stats_flags)[1]);
5444 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5445 ((u32 *)&stats_flags)[0]);
5446 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5447 ((u32 *)&stats_flags)[1]);
5449 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5450 ((u32 *)&stats_flags)[0]);
5451 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5452 ((u32 *)&stats_flags)[1]);
5454 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5455 ((u32 *)&stats_flags)[0]);
5456 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5457 ((u32 *)&stats_flags)[1]);
5459 REG_WR(bp, BAR_XSTRORM_INTMEM +
5460 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5461 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5462 REG_WR(bp, BAR_XSTRORM_INTMEM +
5463 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5464 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5466 REG_WR(bp, BAR_TSTRORM_INTMEM +
5467 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5468 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5469 REG_WR(bp, BAR_TSTRORM_INTMEM +
5470 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5471 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5473 REG_WR(bp, BAR_USTRORM_INTMEM +
5474 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5475 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5476 REG_WR(bp, BAR_USTRORM_INTMEM +
5477 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5478 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5480 if (CHIP_IS_E1H(bp)) {
5481 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5483 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5485 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5487 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5490 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5494 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5496 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5497 SGE_PAGE_SIZE * PAGES_PER_SGE),
5499 for_each_rx_queue(bp, i) {
5500 struct bnx2x_fastpath *fp = &bp->fp[i];
5502 REG_WR(bp, BAR_USTRORM_INTMEM +
5503 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5504 U64_LO(fp->rx_comp_mapping));
5505 REG_WR(bp, BAR_USTRORM_INTMEM +
5506 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5507 U64_HI(fp->rx_comp_mapping));
5510 REG_WR(bp, BAR_USTRORM_INTMEM +
5511 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5512 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5513 REG_WR(bp, BAR_USTRORM_INTMEM +
5514 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5515 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5517 REG_WR16(bp, BAR_USTRORM_INTMEM +
5518 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5522 /* dropless flow control */
5523 if (CHIP_IS_E1H(bp)) {
5524 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5526 rx_pause.bd_thr_low = 250;
5527 rx_pause.cqe_thr_low = 250;
5529 rx_pause.sge_thr_low = 0;
5530 rx_pause.bd_thr_high = 350;
5531 rx_pause.cqe_thr_high = 350;
5532 rx_pause.sge_thr_high = 0;
5534 for_each_rx_queue(bp, i) {
5535 struct bnx2x_fastpath *fp = &bp->fp[i];
5537 if (!fp->disable_tpa) {
5538 rx_pause.sge_thr_low = 150;
5539 rx_pause.sge_thr_high = 250;
5543 offset = BAR_USTRORM_INTMEM +
5544 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5547 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5549 REG_WR(bp, offset + j*4,
5550 ((u32 *)&rx_pause)[j]);
5554 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5556 /* Init rate shaping and fairness contexts */
5560 /* During init there is no active link
5561 Until link is up, set link rate to 10Gbps */
5562 bp->link_vars.line_speed = SPEED_10000;
5563 bnx2x_init_port_minmax(bp);
5567 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5568 bnx2x_calc_vn_weight_sum(bp);
5570 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5571 bnx2x_init_vn_minmax(bp, 2*vn + port);
5573 /* Enable rate shaping and fairness */
5574 bp->cmng.flags.cmng_enables |=
5575 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5578 /* rate shaping and fairness are disabled */
5580 "single function mode minmax will be disabled\n");
5584 /* Store it to internal memory */
5586 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5587 REG_WR(bp, BAR_XSTRORM_INTMEM +
5588 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5589 ((u32 *)(&bp->cmng))[i]);
5592 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5594 switch (load_code) {
5595 case FW_MSG_CODE_DRV_LOAD_COMMON:
5596 bnx2x_init_internal_common(bp);
5599 case FW_MSG_CODE_DRV_LOAD_PORT:
5600 bnx2x_init_internal_port(bp);
5603 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5604 bnx2x_init_internal_func(bp);
5608 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5613 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5617 for_each_queue(bp, i) {
5618 struct bnx2x_fastpath *fp = &bp->fp[i];
5621 fp->state = BNX2X_FP_STATE_CLOSED;
5623 fp->cl_id = BP_L_ID(bp) + i;
5625 fp->sb_id = fp->cl_id + 1;
5627 fp->sb_id = fp->cl_id;
5629 /* Suitable Rx and Tx SBs are served by the same client */
5630 if (i >= bp->num_rx_queues)
5631 fp->cl_id -= bp->num_rx_queues;
5633 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5634 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5635 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5637 bnx2x_update_fpsb_idx(fp);
5640 /* ensure status block indices were read */
5644 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5646 bnx2x_update_dsb_idx(bp);
5647 bnx2x_update_coalesce(bp);
5648 bnx2x_init_rx_rings(bp);
5649 bnx2x_init_tx_ring(bp);
5650 bnx2x_init_sp_ring(bp);
5651 bnx2x_init_context(bp);
5652 bnx2x_init_internal(bp, load_code);
5653 bnx2x_init_ind_table(bp);
5654 bnx2x_stats_init(bp);
5656 /* At this point, we are ready for interrupts */
5657 atomic_set(&bp->intr_sem, 0);
5659 /* flush all before enabling interrupts */
5663 bnx2x_int_enable(bp);
5665 /* Check for SPIO5 */
5666 bnx2x_attn_int_deasserted0(bp,
5667 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5668 AEU_INPUTS_ATTN_BITS_SPIO5);
5671 /* end of nic init */
5674 * gzip service functions
5677 static int bnx2x_gunzip_init(struct bnx2x *bp)
5679 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5680 &bp->gunzip_mapping);
5681 if (bp->gunzip_buf == NULL)
5684 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5685 if (bp->strm == NULL)
5688 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5690 if (bp->strm->workspace == NULL)
5700 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5701 bp->gunzip_mapping);
5702 bp->gunzip_buf = NULL;
5705 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5706 " un-compression\n", bp->dev->name);
5710 static void bnx2x_gunzip_end(struct bnx2x *bp)
5712 kfree(bp->strm->workspace);
5717 if (bp->gunzip_buf) {
5718 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5719 bp->gunzip_mapping);
5720 bp->gunzip_buf = NULL;
5724 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5728 /* check gzip header */
5729 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5730 BNX2X_ERR("Bad gzip header\n");
5738 if (zbuf[3] & FNAME)
5739 while ((zbuf[n++] != 0) && (n < len));
5741 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5742 bp->strm->avail_in = len - n;
5743 bp->strm->next_out = bp->gunzip_buf;
5744 bp->strm->avail_out = FW_BUF_SIZE;
5746 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5750 rc = zlib_inflate(bp->strm, Z_FINISH);
5751 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5752 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5753 bp->dev->name, bp->strm->msg);
5755 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5756 if (bp->gunzip_outlen & 0x3)
5757 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5758 " gunzip_outlen (%d) not aligned\n",
5759 bp->dev->name, bp->gunzip_outlen);
5760 bp->gunzip_outlen >>= 2;
5762 zlib_inflateEnd(bp->strm);
5764 if (rc == Z_STREAM_END)
5770 /* nic load/unload */
5773 * General service functions
5776 /* send a NIG loopback debug packet */
5777 static void bnx2x_lb_pckt(struct bnx2x *bp)
5781 /* Ethernet source and destination addresses */
5782 wb_write[0] = 0x55555555;
5783 wb_write[1] = 0x55555555;
5784 wb_write[2] = 0x20; /* SOP */
5785 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5787 /* NON-IP protocol */
5788 wb_write[0] = 0x09000000;
5789 wb_write[1] = 0x55555555;
5790 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5791 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5794 /* some of the internal memories
5795 * are not directly readable from the driver
5796 * to test them we send debug packets
5798 static int bnx2x_int_mem_test(struct bnx2x *bp)
5804 if (CHIP_REV_IS_FPGA(bp))
5806 else if (CHIP_REV_IS_EMUL(bp))
5811 DP(NETIF_MSG_HW, "start part1\n");
5813 /* Disable inputs of parser neighbor blocks */
5814 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5815 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5816 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5817 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5819 /* Write 0 to parser credits for CFC search request */
5820 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5822 /* send Ethernet packet */
5825 /* TODO do i reset NIG statistic? */
5826 /* Wait until NIG register shows 1 packet of size 0x10 */
5827 count = 1000 * factor;
5830 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5831 val = *bnx2x_sp(bp, wb_data[0]);
5839 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5843 /* Wait until PRS register shows 1 packet */
5844 count = 1000 * factor;
5846 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5854 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5858 /* Reset and init BRB, PRS */
5859 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5861 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5863 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5864 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5866 DP(NETIF_MSG_HW, "part2\n");
5868 /* Disable inputs of parser neighbor blocks */
5869 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5870 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5871 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5872 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5874 /* Write 0 to parser credits for CFC search request */
5875 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5877 /* send 10 Ethernet packets */
5878 for (i = 0; i < 10; i++)
5881 /* Wait until NIG register shows 10 + 1
5882 packets of size 11*0x10 = 0xb0 */
5883 count = 1000 * factor;
5886 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5887 val = *bnx2x_sp(bp, wb_data[0]);
5895 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5899 /* Wait until PRS register shows 2 packets */
5900 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5902 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5904 /* Write 1 to parser credits for CFC search request */
5905 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5907 /* Wait until PRS register shows 3 packets */
5908 msleep(10 * factor);
5909 /* Wait until NIG register shows 1 packet of size 0x10 */
5910 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5912 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5914 /* clear NIG EOP FIFO */
5915 for (i = 0; i < 11; i++)
5916 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5917 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5919 BNX2X_ERR("clear of NIG failed\n");
5923 /* Reset and init BRB, PRS, NIG */
5924 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5926 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5928 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5929 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5932 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5935 /* Enable inputs of parser neighbor blocks */
5936 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5937 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5938 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5939 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5941 DP(NETIF_MSG_HW, "done\n");
5946 static void enable_blocks_attention(struct bnx2x *bp)
5948 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5949 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5950 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5951 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5952 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5953 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5954 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5955 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5956 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5957 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5958 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5959 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5960 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5961 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5962 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5963 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5964 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5965 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5966 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5967 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5968 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5969 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5970 if (CHIP_REV_IS_FPGA(bp))
5971 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5973 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5974 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5975 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5976 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5977 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5978 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5979 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5980 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5981 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5982 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5986 static void bnx2x_reset_common(struct bnx2x *bp)
5989 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5991 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5994 static void bnx2x_init_pxp(struct bnx2x *bp)
5997 int r_order, w_order;
5999 pci_read_config_word(bp->pdev,
6000 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6001 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6002 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6004 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6006 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6010 bnx2x_init_pxp_arb(bp, r_order, w_order);
6013 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6019 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6020 SHARED_HW_CFG_FAN_FAILURE_MASK;
6022 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6026 * The fan failure mechanism is usually related to the PHY type since
6027 * the power consumption of the board is affected by the PHY. Currently,
6028 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6030 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6031 for (port = PORT_0; port < PORT_MAX; port++) {
6033 SHMEM_RD(bp, dev_info.port_hw_config[port].
6034 external_phy_config) &
6035 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6038 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6040 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6042 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6045 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6047 if (is_required == 0)
6050 /* Fan failure is indicated by SPIO 5 */
6051 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6052 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6054 /* set to active low mode */
6055 val = REG_RD(bp, MISC_REG_SPIO_INT);
6056 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6057 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6058 REG_WR(bp, MISC_REG_SPIO_INT, val);
6060 /* enable interrupt to signal the IGU */
6061 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6062 val |= (1 << MISC_REGISTERS_SPIO_5);
6063 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6066 static int bnx2x_init_common(struct bnx2x *bp)
6073 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6075 bnx2x_reset_common(bp);
6076 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6077 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6079 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6080 if (CHIP_IS_E1H(bp))
6081 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6083 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6085 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6087 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6088 if (CHIP_IS_E1(bp)) {
6089 /* enable HW interrupt from PXP on USDM overflow
6090 bit 16 on INT_MASK_0 */
6091 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6094 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6098 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6099 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6100 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6101 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6102 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6103 /* make sure this value is 0 */
6104 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6106 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6107 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6108 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6109 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6110 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6113 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6115 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6116 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6117 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6120 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6121 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6123 /* let the HW do it's magic ... */
6125 /* finish PXP init */
6126 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6128 BNX2X_ERR("PXP2 CFG failed\n");
6131 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6133 BNX2X_ERR("PXP2 RD_INIT failed\n");
6137 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6138 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6140 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6142 /* clean the DMAE memory */
6144 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6146 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6147 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6148 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6149 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6151 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6152 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6153 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6154 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6156 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6161 for (i = 0; i < 64; i++) {
6162 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6163 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6165 if (CHIP_IS_E1H(bp)) {
6166 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6167 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6172 /* soft reset pulse */
6173 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6174 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6177 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6180 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6181 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6182 if (!CHIP_REV_IS_SLOW(bp)) {
6183 /* enable hw interrupt from doorbell Q */
6184 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6187 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6188 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6189 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6192 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6194 if (CHIP_IS_E1H(bp))
6195 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6197 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6198 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6199 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6200 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6202 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6203 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6204 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6205 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6207 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6208 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6209 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6210 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6213 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6215 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6218 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6219 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6220 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6222 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6223 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6224 REG_WR(bp, i, 0xc0cac01a);
6225 /* TODO: replace with something meaningful */
6227 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6229 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6230 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6231 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6232 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6233 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6234 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6235 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6236 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6237 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6238 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6240 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6242 if (sizeof(union cdu_context) != 1024)
6243 /* we currently assume that a context is 1024 bytes */
6244 printk(KERN_ALERT PFX "please adjust the size of"
6245 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6247 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6248 val = (4 << 24) + (0 << 12) + 1024;
6249 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6251 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6252 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6253 /* enable context validation interrupt from CFC */
6254 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6256 /* set the thresholds to prevent CFC/CDU race */
6257 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6259 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6260 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6262 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6263 /* Reset PCIE errors for debug */
6264 REG_WR(bp, 0x2814, 0xffffffff);
6265 REG_WR(bp, 0x3820, 0xffffffff);
6267 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6268 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6269 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6270 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6272 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6273 if (CHIP_IS_E1H(bp)) {
6274 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6275 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6278 if (CHIP_REV_IS_SLOW(bp))
6281 /* finish CFC init */
6282 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6284 BNX2X_ERR("CFC LL_INIT failed\n");
6287 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6289 BNX2X_ERR("CFC AC_INIT failed\n");
6292 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6294 BNX2X_ERR("CFC CAM_INIT failed\n");
6297 REG_WR(bp, CFC_REG_DEBUG0, 0);
6299 /* read NIG statistic
6300 to see if this is our first up since powerup */
6301 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6302 val = *bnx2x_sp(bp, wb_data[0]);
6304 /* do internal memory self test */
6305 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6306 BNX2X_ERR("internal mem self test failed\n");
6310 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6312 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6313 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6314 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6315 bp->port.need_hw_lock = 1;
6322 bnx2x_setup_fan_failure_detection(bp);
6324 /* clear PXP2 attentions */
6325 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6327 enable_blocks_attention(bp);
6329 if (!BP_NOMCP(bp)) {
6330 bnx2x_acquire_phy_lock(bp);
6331 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6332 bnx2x_release_phy_lock(bp);
6334 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6339 static int bnx2x_init_port(struct bnx2x *bp)
6341 int port = BP_PORT(bp);
6342 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6346 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6348 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6350 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6351 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6353 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6354 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6355 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6356 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6359 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6361 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6362 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6363 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6365 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6367 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6368 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6369 /* no pause for emulation and FPGA */
6374 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6375 else if (bp->dev->mtu > 4096) {
6376 if (bp->flags & ONE_PORT_FLAG)
6380 /* (24*1024 + val*4)/256 */
6381 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6384 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6385 high = low + 56; /* 14*1024/256 */
6387 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6388 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6391 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6393 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6394 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6395 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6396 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6398 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6399 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6400 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6401 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6403 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6404 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6406 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6408 /* configure PBF to work without PAUSE mtu 9000 */
6409 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6411 /* update threshold */
6412 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6413 /* update init credit */
6414 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6417 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6419 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6422 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6424 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6425 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6427 if (CHIP_IS_E1(bp)) {
6428 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6429 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6431 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6433 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6434 /* init aeu_mask_attn_func_0/1:
6435 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6436 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6437 * bits 4-7 are used for "per vn group attention" */
6438 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6439 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6441 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6442 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6443 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6444 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6445 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6447 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6449 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6451 if (CHIP_IS_E1H(bp)) {
6452 /* 0x2 disable e1hov, 0x1 enable */
6453 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6454 (IS_E1HMF(bp) ? 0x1 : 0x2));
6457 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6458 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6459 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6463 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6464 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6466 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6469 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6471 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6472 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6474 /* The GPIO should be swapped if the swap register is
6476 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6477 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6479 /* Select function upon port-swap configuration */
6481 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6482 aeu_gpio_mask = (swap_val && swap_override) ?
6483 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6484 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6486 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6487 aeu_gpio_mask = (swap_val && swap_override) ?
6488 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6489 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6491 val = REG_RD(bp, offset);
6492 /* add GPIO3 to group */
6493 val |= aeu_gpio_mask;
6494 REG_WR(bp, offset, val);
6498 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6499 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6500 /* add SPIO 5 to group 0 */
6502 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6503 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6504 val = REG_RD(bp, reg_addr);
6505 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6506 REG_WR(bp, reg_addr, val);
6514 bnx2x__link_reset(bp);
6519 #define ILT_PER_FUNC (768/2)
6520 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6521 /* the phys address is shifted right 12 bits and has an added
6522 1=valid bit added to the 53rd bit
6523 then since this is a wide register(TM)
6524 we split it into two 32 bit writes
6526 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6527 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6528 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6529 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6532 #define CNIC_ILT_LINES 127
6533 #define CNIC_CTX_PER_ILT 16
6535 #define CNIC_ILT_LINES 0
6538 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6542 if (CHIP_IS_E1H(bp))
6543 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6545 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6547 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6550 static int bnx2x_init_func(struct bnx2x *bp)
6552 int port = BP_PORT(bp);
6553 int func = BP_FUNC(bp);
6557 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6559 /* set MSI reconfigure capability */
6560 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6561 val = REG_RD(bp, addr);
6562 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6563 REG_WR(bp, addr, val);
6565 i = FUNC_ILT_BASE(func);
6567 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6568 if (CHIP_IS_E1H(bp)) {
6569 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6570 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6572 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6573 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6576 i += 1 + CNIC_ILT_LINES;
6577 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6579 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6581 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6582 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6586 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6588 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6590 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6591 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6595 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6597 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6599 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6600 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6603 /* tell the searcher where the T2 table is */
6604 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6606 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6607 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6609 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6610 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6611 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6613 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6616 if (CHIP_IS_E1H(bp)) {
6617 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6618 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6619 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6620 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6621 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6622 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6623 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6624 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6625 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6627 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6628 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6631 /* HC init per function */
6632 if (CHIP_IS_E1H(bp)) {
6633 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6636 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6638 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6640 /* Reset PCIE errors for debug */
6641 REG_WR(bp, 0x2114, 0xffffffff);
6642 REG_WR(bp, 0x2120, 0xffffffff);
6647 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6651 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6652 BP_FUNC(bp), load_code);
6655 mutex_init(&bp->dmae_mutex);
6656 rc = bnx2x_gunzip_init(bp);
6660 switch (load_code) {
6661 case FW_MSG_CODE_DRV_LOAD_COMMON:
6662 rc = bnx2x_init_common(bp);
6667 case FW_MSG_CODE_DRV_LOAD_PORT:
6669 rc = bnx2x_init_port(bp);
6674 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6676 rc = bnx2x_init_func(bp);
6682 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6686 if (!BP_NOMCP(bp)) {
6687 int func = BP_FUNC(bp);
6689 bp->fw_drv_pulse_wr_seq =
6690 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6691 DRV_PULSE_SEQ_MASK);
6692 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6695 /* this needs to be done before gunzip end */
6696 bnx2x_zero_def_sb(bp);
6697 for_each_queue(bp, i)
6698 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6700 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6704 bnx2x_gunzip_end(bp);
6709 static void bnx2x_free_mem(struct bnx2x *bp)
6712 #define BNX2X_PCI_FREE(x, y, size) \
6715 pci_free_consistent(bp->pdev, size, x, y); \
6721 #define BNX2X_FREE(x) \
6733 for_each_queue(bp, i) {
6736 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6737 bnx2x_fp(bp, i, status_blk_mapping),
6738 sizeof(struct host_status_block));
6741 for_each_rx_queue(bp, i) {
6743 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6744 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6745 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6746 bnx2x_fp(bp, i, rx_desc_mapping),
6747 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6749 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6750 bnx2x_fp(bp, i, rx_comp_mapping),
6751 sizeof(struct eth_fast_path_rx_cqe) *
6755 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6756 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6757 bnx2x_fp(bp, i, rx_sge_mapping),
6758 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6761 for_each_tx_queue(bp, i) {
6763 /* fastpath tx rings: tx_buf tx_desc */
6764 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6765 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6766 bnx2x_fp(bp, i, tx_desc_mapping),
6767 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6769 /* end of fastpath */
6771 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6772 sizeof(struct host_def_status_block));
6774 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6775 sizeof(struct bnx2x_slowpath));
6778 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6779 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6780 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6781 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6782 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6783 sizeof(struct host_status_block));
6785 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6787 #undef BNX2X_PCI_FREE
6791 static int bnx2x_alloc_mem(struct bnx2x *bp)
6794 #define BNX2X_PCI_ALLOC(x, y, size) \
6796 x = pci_alloc_consistent(bp->pdev, size, y); \
6798 goto alloc_mem_err; \
6799 memset(x, 0, size); \
6802 #define BNX2X_ALLOC(x, size) \
6804 x = vmalloc(size); \
6806 goto alloc_mem_err; \
6807 memset(x, 0, size); \
6814 for_each_queue(bp, i) {
6815 bnx2x_fp(bp, i, bp) = bp;
6818 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6819 &bnx2x_fp(bp, i, status_blk_mapping),
6820 sizeof(struct host_status_block));
6823 for_each_rx_queue(bp, i) {
6825 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6826 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6827 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6828 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6829 &bnx2x_fp(bp, i, rx_desc_mapping),
6830 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6832 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6833 &bnx2x_fp(bp, i, rx_comp_mapping),
6834 sizeof(struct eth_fast_path_rx_cqe) *
6838 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6839 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6840 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6841 &bnx2x_fp(bp, i, rx_sge_mapping),
6842 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6845 for_each_tx_queue(bp, i) {
6847 /* fastpath tx rings: tx_buf tx_desc */
6848 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6849 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6850 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6851 &bnx2x_fp(bp, i, tx_desc_mapping),
6852 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6854 /* end of fastpath */
6856 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6857 sizeof(struct host_def_status_block));
6859 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6860 sizeof(struct bnx2x_slowpath));
6863 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6865 /* allocate searcher T2 table
6866 we allocate 1/4 of alloc num for T2
6867 (which is not entered into the ILT) */
6868 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6870 /* Initialize T2 (for 1024 connections) */
6871 for (i = 0; i < 16*1024; i += 64)
6872 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6874 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6875 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6877 /* QM queues (128*MAX_CONN) */
6878 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6880 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6881 sizeof(struct host_status_block));
6884 /* Slow path ring */
6885 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6893 #undef BNX2X_PCI_ALLOC
6897 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6901 for_each_tx_queue(bp, i) {
6902 struct bnx2x_fastpath *fp = &bp->fp[i];
6904 u16 bd_cons = fp->tx_bd_cons;
6905 u16 sw_prod = fp->tx_pkt_prod;
6906 u16 sw_cons = fp->tx_pkt_cons;
6908 while (sw_cons != sw_prod) {
6909 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6915 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6919 for_each_rx_queue(bp, j) {
6920 struct bnx2x_fastpath *fp = &bp->fp[j];
6922 for (i = 0; i < NUM_RX_BD; i++) {
6923 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6924 struct sk_buff *skb = rx_buf->skb;
6929 pci_unmap_single(bp->pdev,
6930 pci_unmap_addr(rx_buf, mapping),
6931 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6936 if (!fp->disable_tpa)
6937 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6938 ETH_MAX_AGGREGATION_QUEUES_E1 :
6939 ETH_MAX_AGGREGATION_QUEUES_E1H);
6943 static void bnx2x_free_skbs(struct bnx2x *bp)
6945 bnx2x_free_tx_skbs(bp);
6946 bnx2x_free_rx_skbs(bp);
6949 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6953 free_irq(bp->msix_table[0].vector, bp->dev);
6954 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6955 bp->msix_table[0].vector);
6960 for_each_queue(bp, i) {
6961 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6962 "state %x\n", i, bp->msix_table[i + offset].vector,
6963 bnx2x_fp(bp, i, state));
6965 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6969 static void bnx2x_free_irq(struct bnx2x *bp)
6971 if (bp->flags & USING_MSIX_FLAG) {
6972 bnx2x_free_msix_irqs(bp);
6973 pci_disable_msix(bp->pdev);
6974 bp->flags &= ~USING_MSIX_FLAG;
6976 } else if (bp->flags & USING_MSI_FLAG) {
6977 free_irq(bp->pdev->irq, bp->dev);
6978 pci_disable_msi(bp->pdev);
6979 bp->flags &= ~USING_MSI_FLAG;
6982 free_irq(bp->pdev->irq, bp->dev);
6985 static int bnx2x_enable_msix(struct bnx2x *bp)
6987 int i, rc, offset = 1;
6990 bp->msix_table[0].entry = igu_vec;
6991 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6994 igu_vec = BP_L_ID(bp) + offset;
6995 bp->msix_table[1].entry = igu_vec;
6996 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6999 for_each_queue(bp, i) {
7000 igu_vec = BP_L_ID(bp) + offset + i;
7001 bp->msix_table[i + offset].entry = igu_vec;
7002 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7003 "(fastpath #%u)\n", i + offset, igu_vec, i);
7006 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7007 BNX2X_NUM_QUEUES(bp) + offset);
7009 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7013 bp->flags |= USING_MSIX_FLAG;
7018 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7020 int i, rc, offset = 1;
7022 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7023 bp->dev->name, bp->dev);
7025 BNX2X_ERR("request sp irq failed\n");
7032 for_each_queue(bp, i) {
7033 struct bnx2x_fastpath *fp = &bp->fp[i];
7035 if (i < bp->num_rx_queues)
7036 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7038 sprintf(fp->name, "%s-tx-%d",
7039 bp->dev->name, i - bp->num_rx_queues);
7041 rc = request_irq(bp->msix_table[i + offset].vector,
7042 bnx2x_msix_fp_int, 0, fp->name, fp);
7044 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7045 bnx2x_free_msix_irqs(bp);
7049 fp->state = BNX2X_FP_STATE_IRQ;
7052 i = BNX2X_NUM_QUEUES(bp);
7053 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7055 bp->dev->name, bp->msix_table[0].vector,
7056 0, bp->msix_table[offset].vector,
7057 i - 1, bp->msix_table[offset + i - 1].vector);
7062 static int bnx2x_enable_msi(struct bnx2x *bp)
7066 rc = pci_enable_msi(bp->pdev);
7068 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7071 bp->flags |= USING_MSI_FLAG;
7076 static int bnx2x_req_irq(struct bnx2x *bp)
7078 unsigned long flags;
7081 if (bp->flags & USING_MSI_FLAG)
7084 flags = IRQF_SHARED;
7086 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7087 bp->dev->name, bp->dev);
7089 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7094 static void bnx2x_napi_enable(struct bnx2x *bp)
7098 for_each_rx_queue(bp, i)
7099 napi_enable(&bnx2x_fp(bp, i, napi));
7102 static void bnx2x_napi_disable(struct bnx2x *bp)
7106 for_each_rx_queue(bp, i)
7107 napi_disable(&bnx2x_fp(bp, i, napi));
7110 static void bnx2x_netif_start(struct bnx2x *bp)
7114 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7115 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7118 if (netif_running(bp->dev)) {
7119 bnx2x_napi_enable(bp);
7120 bnx2x_int_enable(bp);
7121 if (bp->state == BNX2X_STATE_OPEN)
7122 netif_tx_wake_all_queues(bp->dev);
7127 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7129 bnx2x_int_disable_sync(bp, disable_hw);
7130 bnx2x_napi_disable(bp);
7131 netif_tx_disable(bp->dev);
7132 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7136 * Init service functions
7140 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7142 * @param bp driver descriptor
7143 * @param set set or clear an entry (1 or 0)
7144 * @param mac pointer to a buffer containing a MAC
7145 * @param cl_bit_vec bit vector of clients to register a MAC for
7146 * @param cam_offset offset in a CAM to use
7147 * @param with_bcast set broadcast MAC as well
7149 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7150 u32 cl_bit_vec, u8 cam_offset,
7153 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7154 int port = BP_PORT(bp);
7157 * unicasts 0-31:port0 32-63:port1
7158 * multicast 64-127:port0 128-191:port1
7160 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7161 config->hdr.offset = cam_offset;
7162 config->hdr.client_id = 0xff;
7163 config->hdr.reserved1 = 0;
7166 config->config_table[0].cam_entry.msb_mac_addr =
7167 swab16(*(u16 *)&mac[0]);
7168 config->config_table[0].cam_entry.middle_mac_addr =
7169 swab16(*(u16 *)&mac[2]);
7170 config->config_table[0].cam_entry.lsb_mac_addr =
7171 swab16(*(u16 *)&mac[4]);
7172 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7174 config->config_table[0].target_table_entry.flags = 0;
7176 CAM_INVALIDATE(config->config_table[0]);
7177 config->config_table[0].target_table_entry.clients_bit_vector =
7178 cpu_to_le32(cl_bit_vec);
7179 config->config_table[0].target_table_entry.vlan_id = 0;
7181 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7182 (set ? "setting" : "clearing"),
7183 config->config_table[0].cam_entry.msb_mac_addr,
7184 config->config_table[0].cam_entry.middle_mac_addr,
7185 config->config_table[0].cam_entry.lsb_mac_addr);
7189 config->config_table[1].cam_entry.msb_mac_addr =
7190 cpu_to_le16(0xffff);
7191 config->config_table[1].cam_entry.middle_mac_addr =
7192 cpu_to_le16(0xffff);
7193 config->config_table[1].cam_entry.lsb_mac_addr =
7194 cpu_to_le16(0xffff);
7195 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7197 config->config_table[1].target_table_entry.flags =
7198 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7200 CAM_INVALIDATE(config->config_table[1]);
7201 config->config_table[1].target_table_entry.clients_bit_vector =
7202 cpu_to_le32(cl_bit_vec);
7203 config->config_table[1].target_table_entry.vlan_id = 0;
7206 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7207 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7208 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7212 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7214 * @param bp driver descriptor
7215 * @param set set or clear an entry (1 or 0)
7216 * @param mac pointer to a buffer containing a MAC
7217 * @param cl_bit_vec bit vector of clients to register a MAC for
7218 * @param cam_offset offset in a CAM to use
7220 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7221 u32 cl_bit_vec, u8 cam_offset)
7223 struct mac_configuration_cmd_e1h *config =
7224 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7226 config->hdr.length = 1;
7227 config->hdr.offset = cam_offset;
7228 config->hdr.client_id = 0xff;
7229 config->hdr.reserved1 = 0;
7232 config->config_table[0].msb_mac_addr =
7233 swab16(*(u16 *)&mac[0]);
7234 config->config_table[0].middle_mac_addr =
7235 swab16(*(u16 *)&mac[2]);
7236 config->config_table[0].lsb_mac_addr =
7237 swab16(*(u16 *)&mac[4]);
7238 config->config_table[0].clients_bit_vector =
7239 cpu_to_le32(cl_bit_vec);
7240 config->config_table[0].vlan_id = 0;
7241 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7243 config->config_table[0].flags = BP_PORT(bp);
7245 config->config_table[0].flags =
7246 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7248 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7249 (set ? "setting" : "clearing"),
7250 config->config_table[0].msb_mac_addr,
7251 config->config_table[0].middle_mac_addr,
7252 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7254 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7255 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7256 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7259 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7260 int *state_p, int poll)
7262 /* can take a while if any port is running */
7265 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7266 poll ? "polling" : "waiting", state, idx);
7271 bnx2x_rx_int(bp->fp, 10);
7272 /* if index is different from 0
7273 * the reply for some commands will
7274 * be on the non default queue
7277 bnx2x_rx_int(&bp->fp[idx], 10);
7280 mb(); /* state is changed by bnx2x_sp_event() */
7281 if (*state_p == state) {
7282 #ifdef BNX2X_STOP_ON_ERROR
7283 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7295 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7296 poll ? "polling" : "waiting", state, idx);
7297 #ifdef BNX2X_STOP_ON_ERROR
7304 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7306 bp->set_mac_pending++;
7309 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7310 (1 << bp->fp->cl_id), BP_FUNC(bp));
7312 /* Wait for a completion */
7313 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7316 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7318 bp->set_mac_pending++;
7321 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7322 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7325 /* Wait for a completion */
7326 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7331 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7332 * MAC(s). This function will wait until the ramdord completion
7335 * @param bp driver handle
7336 * @param set set or clear the CAM entry
7338 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7340 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7342 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7344 bp->set_mac_pending++;
7347 /* Send a SET_MAC ramrod */
7349 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7350 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7353 /* CAM allocation for E1H
7354 * unicasts: by func number
7355 * multicast: 20+FUNC*20, 20 each
7357 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7358 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7360 /* Wait for a completion when setting */
7361 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7367 static int bnx2x_setup_leading(struct bnx2x *bp)
7371 /* reset IGU state */
7372 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7375 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7377 /* Wait for completion */
7378 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7383 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7385 struct bnx2x_fastpath *fp = &bp->fp[index];
7387 /* reset IGU state */
7388 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7391 fp->state = BNX2X_FP_STATE_OPENING;
7392 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7395 /* Wait for completion */
7396 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7400 static int bnx2x_poll(struct napi_struct *napi, int budget);
7402 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7403 int *num_tx_queues_out)
7405 int _num_rx_queues = 0, _num_tx_queues = 0;
7407 switch (bp->multi_mode) {
7408 case ETH_RSS_MODE_DISABLED:
7413 case ETH_RSS_MODE_REGULAR:
7415 _num_rx_queues = min_t(u32, num_rx_queues,
7416 BNX2X_MAX_QUEUES(bp));
7418 _num_rx_queues = min_t(u32, num_online_cpus(),
7419 BNX2X_MAX_QUEUES(bp));
7422 _num_tx_queues = min_t(u32, num_tx_queues,
7423 BNX2X_MAX_QUEUES(bp));
7425 _num_tx_queues = min_t(u32, num_online_cpus(),
7426 BNX2X_MAX_QUEUES(bp));
7428 /* There must be not more Tx queues than Rx queues */
7429 if (_num_tx_queues > _num_rx_queues) {
7430 BNX2X_ERR("number of tx queues (%d) > "
7431 "number of rx queues (%d)"
7432 " defaulting to %d\n",
7433 _num_tx_queues, _num_rx_queues,
7435 _num_tx_queues = _num_rx_queues;
7446 *num_rx_queues_out = _num_rx_queues;
7447 *num_tx_queues_out = _num_tx_queues;
7450 static int bnx2x_set_int_mode(struct bnx2x *bp)
7457 bp->num_rx_queues = 1;
7458 bp->num_tx_queues = 1;
7459 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7464 /* Set interrupt mode according to bp->multi_mode value */
7465 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7466 &bp->num_tx_queues);
7468 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7469 bp->num_rx_queues, bp->num_tx_queues);
7471 /* if we can't use MSI-X we only need one fp,
7472 * so try to enable MSI-X with the requested number of fp's
7473 * and fallback to MSI or legacy INTx with one fp
7475 rc = bnx2x_enable_msix(bp);
7477 /* failed to enable MSI-X */
7479 BNX2X_ERR("Multi requested but failed to "
7480 "enable MSI-X (rx %d tx %d), "
7481 "set number of queues to 1\n",
7482 bp->num_rx_queues, bp->num_tx_queues);
7483 bp->num_rx_queues = 1;
7484 bp->num_tx_queues = 1;
7488 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7493 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7494 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7497 /* must be called with rtnl_lock */
7498 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7503 #ifdef BNX2X_STOP_ON_ERROR
7504 if (unlikely(bp->panic))
7508 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7510 rc = bnx2x_set_int_mode(bp);
7512 if (bnx2x_alloc_mem(bp))
7515 for_each_rx_queue(bp, i)
7516 bnx2x_fp(bp, i, disable_tpa) =
7517 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7519 for_each_rx_queue(bp, i)
7520 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7523 bnx2x_napi_enable(bp);
7525 if (bp->flags & USING_MSIX_FLAG) {
7526 rc = bnx2x_req_msix_irqs(bp);
7528 pci_disable_msix(bp->pdev);
7532 /* Fall to INTx if failed to enable MSI-X due to lack of
7533 memory (in bnx2x_set_int_mode()) */
7534 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7535 bnx2x_enable_msi(bp);
7537 rc = bnx2x_req_irq(bp);
7539 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7540 if (bp->flags & USING_MSI_FLAG)
7541 pci_disable_msi(bp->pdev);
7544 if (bp->flags & USING_MSI_FLAG) {
7545 bp->dev->irq = bp->pdev->irq;
7546 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7547 bp->dev->name, bp->pdev->irq);
7551 /* Send LOAD_REQUEST command to MCP
7552 Returns the type of LOAD command:
7553 if it is the first port to be initialized
7554 common blocks should be initialized, otherwise - not
7556 if (!BP_NOMCP(bp)) {
7557 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7559 BNX2X_ERR("MCP response failure, aborting\n");
7563 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7564 rc = -EBUSY; /* other port in diagnostic mode */
7569 int port = BP_PORT(bp);
7571 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7572 load_count[0], load_count[1], load_count[2]);
7574 load_count[1 + port]++;
7575 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7576 load_count[0], load_count[1], load_count[2]);
7577 if (load_count[0] == 1)
7578 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7579 else if (load_count[1 + port] == 1)
7580 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7582 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7585 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7586 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7590 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7593 rc = bnx2x_init_hw(bp, load_code);
7595 BNX2X_ERR("HW init failed, aborting\n");
7599 /* Setup NIC internals and enable interrupts */
7600 bnx2x_nic_init(bp, load_code);
7602 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7603 (bp->common.shmem2_base))
7604 SHMEM2_WR(bp, dcc_support,
7605 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7606 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7608 /* Send LOAD_DONE command to MCP */
7609 if (!BP_NOMCP(bp)) {
7610 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7612 BNX2X_ERR("MCP response failure, aborting\n");
7618 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7620 rc = bnx2x_setup_leading(bp);
7622 BNX2X_ERR("Setup leading failed!\n");
7623 #ifndef BNX2X_STOP_ON_ERROR
7631 if (CHIP_IS_E1H(bp))
7632 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7633 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7634 bp->flags |= MF_FUNC_DIS;
7637 if (bp->state == BNX2X_STATE_OPEN) {
7639 /* Enable Timer scan */
7640 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7642 for_each_nondefault_queue(bp, i) {
7643 rc = bnx2x_setup_multi(bp, i);
7653 bnx2x_set_eth_mac_addr_e1(bp, 1);
7655 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7657 /* Set iSCSI L2 MAC */
7658 mutex_lock(&bp->cnic_mutex);
7659 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7660 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7661 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7663 mutex_unlock(&bp->cnic_mutex);
7668 bnx2x_initial_phy_init(bp, load_mode);
7670 /* Start fast path */
7671 switch (load_mode) {
7673 if (bp->state == BNX2X_STATE_OPEN) {
7674 /* Tx queue should be only reenabled */
7675 netif_tx_wake_all_queues(bp->dev);
7677 /* Initialize the receive filter. */
7678 bnx2x_set_rx_mode(bp->dev);
7682 netif_tx_start_all_queues(bp->dev);
7683 if (bp->state != BNX2X_STATE_OPEN)
7684 netif_tx_disable(bp->dev);
7685 /* Initialize the receive filter. */
7686 bnx2x_set_rx_mode(bp->dev);
7690 /* Initialize the receive filter. */
7691 bnx2x_set_rx_mode(bp->dev);
7692 bp->state = BNX2X_STATE_DIAG;
7700 bnx2x__link_status_update(bp);
7702 /* start the timer */
7703 mod_timer(&bp->timer, jiffies + bp->current_interval);
7706 bnx2x_setup_cnic_irq_info(bp);
7707 if (bp->state == BNX2X_STATE_OPEN)
7708 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7715 /* Disable Timer scan */
7716 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7719 bnx2x_int_disable_sync(bp, 1);
7720 if (!BP_NOMCP(bp)) {
7721 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7722 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7725 /* Free SKBs, SGEs, TPA pool and driver internals */
7726 bnx2x_free_skbs(bp);
7727 for_each_rx_queue(bp, i)
7728 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7733 bnx2x_napi_disable(bp);
7734 for_each_rx_queue(bp, i)
7735 netif_napi_del(&bnx2x_fp(bp, i, napi));
7741 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7743 struct bnx2x_fastpath *fp = &bp->fp[index];
7746 /* halt the connection */
7747 fp->state = BNX2X_FP_STATE_HALTING;
7748 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7750 /* Wait for completion */
7751 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7753 if (rc) /* timeout */
7756 /* delete cfc entry */
7757 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7759 /* Wait for completion */
7760 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7765 static int bnx2x_stop_leading(struct bnx2x *bp)
7767 __le16 dsb_sp_prod_idx;
7768 /* if the other port is handling traffic,
7769 this can take a lot of time */
7775 /* Send HALT ramrod */
7776 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7777 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7779 /* Wait for completion */
7780 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7781 &(bp->fp[0].state), 1);
7782 if (rc) /* timeout */
7785 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7787 /* Send PORT_DELETE ramrod */
7788 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7790 /* Wait for completion to arrive on default status block
7791 we are going to reset the chip anyway
7792 so there is not much to do if this times out
7794 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7796 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7797 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7798 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7799 #ifdef BNX2X_STOP_ON_ERROR
7807 rmb(); /* Refresh the dsb_sp_prod */
7809 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7810 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7815 static void bnx2x_reset_func(struct bnx2x *bp)
7817 int port = BP_PORT(bp);
7818 int func = BP_FUNC(bp);
7822 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7823 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7826 /* Disable Timer scan */
7827 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7829 * Wait for at least 10ms and up to 2 second for the timers scan to
7832 for (i = 0; i < 200; i++) {
7834 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7839 base = FUNC_ILT_BASE(func);
7840 for (i = base; i < base + ILT_PER_FUNC; i++)
7841 bnx2x_ilt_wr(bp, i, 0);
7844 static void bnx2x_reset_port(struct bnx2x *bp)
7846 int port = BP_PORT(bp);
7849 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7851 /* Do not rcv packets to BRB */
7852 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7853 /* Do not direct rcv packets that are not for MCP to the BRB */
7854 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7855 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7858 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7861 /* Check for BRB port occupancy */
7862 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7864 DP(NETIF_MSG_IFDOWN,
7865 "BRB1 is not empty %d blocks are occupied\n", val);
7867 /* TODO: Close Doorbell port? */
7870 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7872 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7873 BP_FUNC(bp), reset_code);
7875 switch (reset_code) {
7876 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7877 bnx2x_reset_port(bp);
7878 bnx2x_reset_func(bp);
7879 bnx2x_reset_common(bp);
7882 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7883 bnx2x_reset_port(bp);
7884 bnx2x_reset_func(bp);
7887 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7888 bnx2x_reset_func(bp);
7892 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7897 /* must be called with rtnl_lock */
7898 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7900 int port = BP_PORT(bp);
7905 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7907 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7909 /* Set "drop all" */
7910 bp->rx_mode = BNX2X_RX_MODE_NONE;
7911 bnx2x_set_storm_rx_mode(bp);
7913 /* Disable HW interrupts, NAPI and Tx */
7914 bnx2x_netif_stop(bp, 1);
7916 del_timer_sync(&bp->timer);
7917 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7918 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7919 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7924 /* Wait until tx fastpath tasks complete */
7925 for_each_tx_queue(bp, i) {
7926 struct bnx2x_fastpath *fp = &bp->fp[i];
7929 while (bnx2x_has_tx_work_unload(fp)) {
7933 BNX2X_ERR("timeout waiting for queue[%d]\n",
7935 #ifdef BNX2X_STOP_ON_ERROR
7946 /* Give HW time to discard old tx messages */
7949 if (CHIP_IS_E1(bp)) {
7950 struct mac_configuration_cmd *config =
7951 bnx2x_sp(bp, mcast_config);
7953 bnx2x_set_eth_mac_addr_e1(bp, 0);
7955 for (i = 0; i < config->hdr.length; i++)
7956 CAM_INVALIDATE(config->config_table[i]);
7958 config->hdr.length = i;
7959 if (CHIP_REV_IS_SLOW(bp))
7960 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7962 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7963 config->hdr.client_id = bp->fp->cl_id;
7964 config->hdr.reserved1 = 0;
7966 bp->set_mac_pending++;
7969 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7970 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7971 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7974 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7976 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7978 for (i = 0; i < MC_HASH_SIZE; i++)
7979 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7981 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7984 /* Clear iSCSI L2 MAC */
7985 mutex_lock(&bp->cnic_mutex);
7986 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7987 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7988 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7990 mutex_unlock(&bp->cnic_mutex);
7993 if (unload_mode == UNLOAD_NORMAL)
7994 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7996 else if (bp->flags & NO_WOL_FLAG)
7997 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8000 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8001 u8 *mac_addr = bp->dev->dev_addr;
8003 /* The mac address is written to entries 1-4 to
8004 preserve entry 0 which is used by the PMF */
8005 u8 entry = (BP_E1HVN(bp) + 1)*8;
8007 val = (mac_addr[0] << 8) | mac_addr[1];
8008 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8010 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8011 (mac_addr[4] << 8) | mac_addr[5];
8012 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8014 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8017 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8019 /* Close multi and leading connections
8020 Completions for ramrods are collected in a synchronous way */
8021 for_each_nondefault_queue(bp, i)
8022 if (bnx2x_stop_multi(bp, i))
8025 rc = bnx2x_stop_leading(bp);
8027 BNX2X_ERR("Stop leading failed!\n");
8028 #ifdef BNX2X_STOP_ON_ERROR
8037 reset_code = bnx2x_fw_command(bp, reset_code);
8039 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8040 load_count[0], load_count[1], load_count[2]);
8042 load_count[1 + port]--;
8043 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8044 load_count[0], load_count[1], load_count[2]);
8045 if (load_count[0] == 0)
8046 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8047 else if (load_count[1 + port] == 0)
8048 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8050 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8053 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8054 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8055 bnx2x__link_reset(bp);
8057 /* Reset the chip */
8058 bnx2x_reset_chip(bp, reset_code);
8060 /* Report UNLOAD_DONE to MCP */
8062 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8066 /* Free SKBs, SGEs, TPA pool and driver internals */
8067 bnx2x_free_skbs(bp);
8068 for_each_rx_queue(bp, i)
8069 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8070 for_each_rx_queue(bp, i)
8071 netif_napi_del(&bnx2x_fp(bp, i, napi));
8074 bp->state = BNX2X_STATE_CLOSED;
8076 netif_carrier_off(bp->dev);
8081 static void bnx2x_reset_task(struct work_struct *work)
8083 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8085 #ifdef BNX2X_STOP_ON_ERROR
8086 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8087 " so reset not done to allow debug dump,\n"
8088 " you will need to reboot when done\n");
8094 if (!netif_running(bp->dev))
8095 goto reset_task_exit;
8097 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8098 bnx2x_nic_load(bp, LOAD_NORMAL);
8104 /* end of nic load/unload */
8109 * Init service functions
8112 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8115 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8116 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8117 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8118 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8119 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8120 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8121 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8122 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8124 BNX2X_ERR("Unsupported function index: %d\n", func);
8129 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8131 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8133 /* Flush all outstanding writes */
8136 /* Pretend to be function 0 */
8138 /* Flush the GRC transaction (in the chip) */
8139 new_val = REG_RD(bp, reg);
8141 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8146 /* From now we are in the "like-E1" mode */
8147 bnx2x_int_disable(bp);
8149 /* Flush all outstanding writes */
8152 /* Restore the original funtion settings */
8153 REG_WR(bp, reg, orig_func);
8154 new_val = REG_RD(bp, reg);
8155 if (new_val != orig_func) {
8156 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8157 orig_func, new_val);
8162 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8164 if (CHIP_IS_E1H(bp))
8165 bnx2x_undi_int_disable_e1h(bp, func);
8167 bnx2x_int_disable(bp);
8170 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8174 /* Check if there is any driver already loaded */
8175 val = REG_RD(bp, MISC_REG_UNPREPARED);
8177 /* Check if it is the UNDI driver
8178 * UNDI driver initializes CID offset for normal bell to 0x7
8180 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8181 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8183 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8185 int func = BP_FUNC(bp);
8189 /* clear the UNDI indication */
8190 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8192 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8194 /* try unload UNDI on port 0 */
8197 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8198 DRV_MSG_SEQ_NUMBER_MASK);
8199 reset_code = bnx2x_fw_command(bp, reset_code);
8201 /* if UNDI is loaded on the other port */
8202 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8204 /* send "DONE" for previous unload */
8205 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8207 /* unload UNDI on port 1 */
8210 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8211 DRV_MSG_SEQ_NUMBER_MASK);
8212 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8214 bnx2x_fw_command(bp, reset_code);
8217 /* now it's safe to release the lock */
8218 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8220 bnx2x_undi_int_disable(bp, func);
8222 /* close input traffic and wait for it */
8223 /* Do not rcv packets to BRB */
8225 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8226 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8227 /* Do not direct rcv packets that are not for MCP to
8230 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8231 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8234 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8235 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8238 /* save NIG port swap info */
8239 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8240 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8243 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8246 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8248 /* take the NIG out of reset and restore swap values */
8250 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8251 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8252 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8253 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8255 /* send unload done to the MCP */
8256 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8258 /* restore our func and fw_seq */
8261 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8262 DRV_MSG_SEQ_NUMBER_MASK);
8265 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8269 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8271 u32 val, val2, val3, val4, id;
8274 /* Get the chip revision id and number. */
8275 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8276 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8277 id = ((val & 0xffff) << 16);
8278 val = REG_RD(bp, MISC_REG_CHIP_REV);
8279 id |= ((val & 0xf) << 12);
8280 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8281 id |= ((val & 0xff) << 4);
8282 val = REG_RD(bp, MISC_REG_BOND_ID);
8284 bp->common.chip_id = id;
8285 bp->link_params.chip_id = bp->common.chip_id;
8286 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8288 val = (REG_RD(bp, 0x2874) & 0x55);
8289 if ((bp->common.chip_id & 0x1) ||
8290 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8291 bp->flags |= ONE_PORT_FLAG;
8292 BNX2X_DEV_INFO("single port device\n");
8295 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8296 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8297 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8298 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8299 bp->common.flash_size, bp->common.flash_size);
8301 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8302 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8303 bp->link_params.shmem_base = bp->common.shmem_base;
8304 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8305 bp->common.shmem_base, bp->common.shmem2_base);
8307 if (!bp->common.shmem_base ||
8308 (bp->common.shmem_base < 0xA0000) ||
8309 (bp->common.shmem_base >= 0xC0000)) {
8310 BNX2X_DEV_INFO("MCP not active\n");
8311 bp->flags |= NO_MCP_FLAG;
8315 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8316 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8317 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8318 BNX2X_ERR("BAD MCP validity signature\n");
8320 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8321 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8323 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8324 SHARED_HW_CFG_LED_MODE_MASK) >>
8325 SHARED_HW_CFG_LED_MODE_SHIFT);
8327 bp->link_params.feature_config_flags = 0;
8328 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8329 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8330 bp->link_params.feature_config_flags |=
8331 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8333 bp->link_params.feature_config_flags &=
8334 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8336 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8337 bp->common.bc_ver = val;
8338 BNX2X_DEV_INFO("bc_ver %X\n", val);
8339 if (val < BNX2X_BC_VER) {
8340 /* for now only warn
8341 * later we might need to enforce this */
8342 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8343 " please upgrade BC\n", BNX2X_BC_VER, val);
8345 bp->link_params.feature_config_flags |=
8346 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8347 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8349 if (BP_E1HVN(bp) == 0) {
8350 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8351 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8353 /* no WOL capability for E1HVN != 0 */
8354 bp->flags |= NO_WOL_FLAG;
8356 BNX2X_DEV_INFO("%sWoL capable\n",
8357 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8359 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8360 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8361 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8362 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8364 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8365 val, val2, val3, val4);
8368 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8371 int port = BP_PORT(bp);
8374 switch (switch_cfg) {
8376 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8379 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8380 switch (ext_phy_type) {
8381 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8382 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8385 bp->port.supported |= (SUPPORTED_10baseT_Half |
8386 SUPPORTED_10baseT_Full |
8387 SUPPORTED_100baseT_Half |
8388 SUPPORTED_100baseT_Full |
8389 SUPPORTED_1000baseT_Full |
8390 SUPPORTED_2500baseX_Full |
8395 SUPPORTED_Asym_Pause);
8398 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8399 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8402 bp->port.supported |= (SUPPORTED_10baseT_Half |
8403 SUPPORTED_10baseT_Full |
8404 SUPPORTED_100baseT_Half |
8405 SUPPORTED_100baseT_Full |
8406 SUPPORTED_1000baseT_Full |
8411 SUPPORTED_Asym_Pause);
8415 BNX2X_ERR("NVRAM config error. "
8416 "BAD SerDes ext_phy_config 0x%x\n",
8417 bp->link_params.ext_phy_config);
8421 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8423 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8426 case SWITCH_CFG_10G:
8427 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8430 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8431 switch (ext_phy_type) {
8432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8433 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8436 bp->port.supported |= (SUPPORTED_10baseT_Half |
8437 SUPPORTED_10baseT_Full |
8438 SUPPORTED_100baseT_Half |
8439 SUPPORTED_100baseT_Full |
8440 SUPPORTED_1000baseT_Full |
8441 SUPPORTED_2500baseX_Full |
8442 SUPPORTED_10000baseT_Full |
8447 SUPPORTED_Asym_Pause);
8450 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8451 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8454 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8455 SUPPORTED_1000baseT_Full |
8459 SUPPORTED_Asym_Pause);
8462 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8463 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8466 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8467 SUPPORTED_2500baseX_Full |
8468 SUPPORTED_1000baseT_Full |
8472 SUPPORTED_Asym_Pause);
8475 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8476 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8479 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8482 SUPPORTED_Asym_Pause);
8485 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8486 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8489 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8490 SUPPORTED_1000baseT_Full |
8493 SUPPORTED_Asym_Pause);
8496 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8497 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8500 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8501 SUPPORTED_1000baseT_Full |
8505 SUPPORTED_Asym_Pause);
8508 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8509 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8512 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8513 SUPPORTED_1000baseT_Full |
8517 SUPPORTED_Asym_Pause);
8520 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8521 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8524 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8528 SUPPORTED_Asym_Pause);
8531 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8532 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8535 bp->port.supported |= (SUPPORTED_10baseT_Half |
8536 SUPPORTED_10baseT_Full |
8537 SUPPORTED_100baseT_Half |
8538 SUPPORTED_100baseT_Full |
8539 SUPPORTED_1000baseT_Full |
8540 SUPPORTED_10000baseT_Full |
8544 SUPPORTED_Asym_Pause);
8547 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8548 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8549 bp->link_params.ext_phy_config);
8553 BNX2X_ERR("NVRAM config error. "
8554 "BAD XGXS ext_phy_config 0x%x\n",
8555 bp->link_params.ext_phy_config);
8559 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8561 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8566 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8567 bp->port.link_config);
8570 bp->link_params.phy_addr = bp->port.phy_addr;
8572 /* mask what we support according to speed_cap_mask */
8573 if (!(bp->link_params.speed_cap_mask &
8574 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8575 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8577 if (!(bp->link_params.speed_cap_mask &
8578 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8579 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8581 if (!(bp->link_params.speed_cap_mask &
8582 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8583 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8585 if (!(bp->link_params.speed_cap_mask &
8586 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8587 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8589 if (!(bp->link_params.speed_cap_mask &
8590 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8591 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8592 SUPPORTED_1000baseT_Full);
8594 if (!(bp->link_params.speed_cap_mask &
8595 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8596 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8598 if (!(bp->link_params.speed_cap_mask &
8599 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8600 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8602 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8605 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8607 bp->link_params.req_duplex = DUPLEX_FULL;
8609 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8610 case PORT_FEATURE_LINK_SPEED_AUTO:
8611 if (bp->port.supported & SUPPORTED_Autoneg) {
8612 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8613 bp->port.advertising = bp->port.supported;
8616 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8618 if ((ext_phy_type ==
8619 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8621 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8622 /* force 10G, no AN */
8623 bp->link_params.req_line_speed = SPEED_10000;
8624 bp->port.advertising =
8625 (ADVERTISED_10000baseT_Full |
8629 BNX2X_ERR("NVRAM config error. "
8630 "Invalid link_config 0x%x"
8631 " Autoneg not supported\n",
8632 bp->port.link_config);
8637 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8638 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8639 bp->link_params.req_line_speed = SPEED_10;
8640 bp->port.advertising = (ADVERTISED_10baseT_Full |
8643 BNX2X_ERR("NVRAM config error. "
8644 "Invalid link_config 0x%x"
8645 " speed_cap_mask 0x%x\n",
8646 bp->port.link_config,
8647 bp->link_params.speed_cap_mask);
8652 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8653 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8654 bp->link_params.req_line_speed = SPEED_10;
8655 bp->link_params.req_duplex = DUPLEX_HALF;
8656 bp->port.advertising = (ADVERTISED_10baseT_Half |
8659 BNX2X_ERR("NVRAM config error. "
8660 "Invalid link_config 0x%x"
8661 " speed_cap_mask 0x%x\n",
8662 bp->port.link_config,
8663 bp->link_params.speed_cap_mask);
8668 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8669 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8670 bp->link_params.req_line_speed = SPEED_100;
8671 bp->port.advertising = (ADVERTISED_100baseT_Full |
8674 BNX2X_ERR("NVRAM config error. "
8675 "Invalid link_config 0x%x"
8676 " speed_cap_mask 0x%x\n",
8677 bp->port.link_config,
8678 bp->link_params.speed_cap_mask);
8683 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8684 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8685 bp->link_params.req_line_speed = SPEED_100;
8686 bp->link_params.req_duplex = DUPLEX_HALF;
8687 bp->port.advertising = (ADVERTISED_100baseT_Half |
8690 BNX2X_ERR("NVRAM config error. "
8691 "Invalid link_config 0x%x"
8692 " speed_cap_mask 0x%x\n",
8693 bp->port.link_config,
8694 bp->link_params.speed_cap_mask);
8699 case PORT_FEATURE_LINK_SPEED_1G:
8700 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8701 bp->link_params.req_line_speed = SPEED_1000;
8702 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8705 BNX2X_ERR("NVRAM config error. "
8706 "Invalid link_config 0x%x"
8707 " speed_cap_mask 0x%x\n",
8708 bp->port.link_config,
8709 bp->link_params.speed_cap_mask);
8714 case PORT_FEATURE_LINK_SPEED_2_5G:
8715 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8716 bp->link_params.req_line_speed = SPEED_2500;
8717 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8720 BNX2X_ERR("NVRAM config error. "
8721 "Invalid link_config 0x%x"
8722 " speed_cap_mask 0x%x\n",
8723 bp->port.link_config,
8724 bp->link_params.speed_cap_mask);
8729 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8730 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8731 case PORT_FEATURE_LINK_SPEED_10G_KR:
8732 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8733 bp->link_params.req_line_speed = SPEED_10000;
8734 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8737 BNX2X_ERR("NVRAM config error. "
8738 "Invalid link_config 0x%x"
8739 " speed_cap_mask 0x%x\n",
8740 bp->port.link_config,
8741 bp->link_params.speed_cap_mask);
8747 BNX2X_ERR("NVRAM config error. "
8748 "BAD link speed link_config 0x%x\n",
8749 bp->port.link_config);
8750 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8751 bp->port.advertising = bp->port.supported;
8755 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8756 PORT_FEATURE_FLOW_CONTROL_MASK);
8757 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8758 !(bp->port.supported & SUPPORTED_Autoneg))
8759 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8761 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8762 " advertising 0x%x\n",
8763 bp->link_params.req_line_speed,
8764 bp->link_params.req_duplex,
8765 bp->link_params.req_flow_ctrl, bp->port.advertising);
8768 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8770 mac_hi = cpu_to_be16(mac_hi);
8771 mac_lo = cpu_to_be32(mac_lo);
8772 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8773 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8776 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8778 int port = BP_PORT(bp);
8784 bp->link_params.bp = bp;
8785 bp->link_params.port = port;
8787 bp->link_params.lane_config =
8788 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8789 bp->link_params.ext_phy_config =
8791 dev_info.port_hw_config[port].external_phy_config);
8792 /* BCM8727_NOC => BCM8727 no over current */
8793 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8794 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8795 bp->link_params.ext_phy_config &=
8796 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8797 bp->link_params.ext_phy_config |=
8798 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8799 bp->link_params.feature_config_flags |=
8800 FEATURE_CONFIG_BCM8727_NOC;
8803 bp->link_params.speed_cap_mask =
8805 dev_info.port_hw_config[port].speed_capability_mask);
8807 bp->port.link_config =
8808 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8810 /* Get the 4 lanes xgxs config rx and tx */
8811 for (i = 0; i < 2; i++) {
8813 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8814 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8815 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8818 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8819 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8820 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8823 /* If the device is capable of WoL, set the default state according
8826 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8827 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8828 (config & PORT_FEATURE_WOL_ENABLED));
8830 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8831 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8832 bp->link_params.lane_config,
8833 bp->link_params.ext_phy_config,
8834 bp->link_params.speed_cap_mask, bp->port.link_config);
8836 bp->link_params.switch_cfg |= (bp->port.link_config &
8837 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8838 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8840 bnx2x_link_settings_requested(bp);
8843 * If connected directly, work with the internal PHY, otherwise, work
8844 * with the external PHY
8846 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8847 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8848 bp->mdio.prtad = bp->link_params.phy_addr;
8850 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8851 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8853 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8855 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8856 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8857 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8858 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8859 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8862 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8863 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8864 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8868 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8870 int func = BP_FUNC(bp);
8874 bnx2x_get_common_hwinfo(bp);
8878 if (CHIP_IS_E1H(bp)) {
8880 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8882 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8883 FUNC_MF_CFG_E1HOV_TAG_MASK);
8884 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8886 BNX2X_DEV_INFO("%s function mode\n",
8887 IS_E1HMF(bp) ? "multi" : "single");
8890 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8892 FUNC_MF_CFG_E1HOV_TAG_MASK);
8893 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8895 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8897 func, bp->e1hov, bp->e1hov);
8899 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8900 " aborting\n", func);
8905 BNX2X_ERR("!!! VN %d in single function mode,"
8906 " aborting\n", BP_E1HVN(bp));
8912 if (!BP_NOMCP(bp)) {
8913 bnx2x_get_port_hwinfo(bp);
8915 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8916 DRV_MSG_SEQ_NUMBER_MASK);
8917 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8921 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8922 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8923 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8924 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8925 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8926 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8927 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8928 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8929 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8930 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8931 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8933 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8941 /* only supposed to happen on emulation/FPGA */
8942 BNX2X_ERR("warning random MAC workaround active\n");
8943 random_ether_addr(bp->dev->dev_addr);
8944 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8950 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8952 int func = BP_FUNC(bp);
8956 /* Disable interrupt handling until HW is initialized */
8957 atomic_set(&bp->intr_sem, 1);
8958 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8960 mutex_init(&bp->port.phy_mutex);
8961 mutex_init(&bp->fw_mb_mutex);
8963 mutex_init(&bp->cnic_mutex);
8966 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8967 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8969 rc = bnx2x_get_hwinfo(bp);
8971 /* need to reset chip if undi was active */
8973 bnx2x_undi_unload(bp);
8975 if (CHIP_REV_IS_FPGA(bp))
8976 printk(KERN_ERR PFX "FPGA detected\n");
8978 if (BP_NOMCP(bp) && (func == 0))
8980 "MCP disabled, must load devices in order!\n");
8982 /* Set multi queue mode */
8983 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8984 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8986 "Multi disabled since int_mode requested is not MSI-X\n");
8987 multi_mode = ETH_RSS_MODE_DISABLED;
8989 bp->multi_mode = multi_mode;
8994 bp->flags &= ~TPA_ENABLE_FLAG;
8995 bp->dev->features &= ~NETIF_F_LRO;
8997 bp->flags |= TPA_ENABLE_FLAG;
8998 bp->dev->features |= NETIF_F_LRO;
9002 bp->dropless_fc = 0;
9004 bp->dropless_fc = dropless_fc;
9008 bp->tx_ring_size = MAX_TX_AVAIL;
9009 bp->rx_ring_size = MAX_RX_AVAIL;
9016 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9017 bp->current_interval = (poll ? poll : timer_interval);
9019 init_timer(&bp->timer);
9020 bp->timer.expires = jiffies + bp->current_interval;
9021 bp->timer.data = (unsigned long) bp;
9022 bp->timer.function = bnx2x_timer;
9028 * ethtool service functions
9031 /* All ethtool functions called with rtnl_lock */
9033 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9035 struct bnx2x *bp = netdev_priv(dev);
9037 cmd->supported = bp->port.supported;
9038 cmd->advertising = bp->port.advertising;
9040 if ((bp->state == BNX2X_STATE_OPEN) &&
9041 !(bp->flags & MF_FUNC_DIS) &&
9042 (bp->link_vars.link_up)) {
9043 cmd->speed = bp->link_vars.line_speed;
9044 cmd->duplex = bp->link_vars.duplex;
9049 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9050 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9051 if (vn_max_rate < cmd->speed)
9052 cmd->speed = vn_max_rate;
9059 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9061 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9063 switch (ext_phy_type) {
9064 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9065 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9066 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9067 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9068 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9069 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9071 cmd->port = PORT_FIBRE;
9074 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9075 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9076 cmd->port = PORT_TP;
9079 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9080 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9081 bp->link_params.ext_phy_config);
9085 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9086 bp->link_params.ext_phy_config);
9090 cmd->port = PORT_TP;
9092 cmd->phy_address = bp->mdio.prtad;
9093 cmd->transceiver = XCVR_INTERNAL;
9095 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9096 cmd->autoneg = AUTONEG_ENABLE;
9098 cmd->autoneg = AUTONEG_DISABLE;
9103 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9104 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9105 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9106 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9107 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9108 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9109 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9114 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9116 struct bnx2x *bp = netdev_priv(dev);
9122 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9123 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9124 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9125 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9126 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9127 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9128 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9130 if (cmd->autoneg == AUTONEG_ENABLE) {
9131 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9132 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9136 /* advertise the requested speed and duplex if supported */
9137 cmd->advertising &= bp->port.supported;
9139 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9140 bp->link_params.req_duplex = DUPLEX_FULL;
9141 bp->port.advertising |= (ADVERTISED_Autoneg |
9144 } else { /* forced speed */
9145 /* advertise the requested speed and duplex if supported */
9146 switch (cmd->speed) {
9148 if (cmd->duplex == DUPLEX_FULL) {
9149 if (!(bp->port.supported &
9150 SUPPORTED_10baseT_Full)) {
9152 "10M full not supported\n");
9156 advertising = (ADVERTISED_10baseT_Full |
9159 if (!(bp->port.supported &
9160 SUPPORTED_10baseT_Half)) {
9162 "10M half not supported\n");
9166 advertising = (ADVERTISED_10baseT_Half |
9172 if (cmd->duplex == DUPLEX_FULL) {
9173 if (!(bp->port.supported &
9174 SUPPORTED_100baseT_Full)) {
9176 "100M full not supported\n");
9180 advertising = (ADVERTISED_100baseT_Full |
9183 if (!(bp->port.supported &
9184 SUPPORTED_100baseT_Half)) {
9186 "100M half not supported\n");
9190 advertising = (ADVERTISED_100baseT_Half |
9196 if (cmd->duplex != DUPLEX_FULL) {
9197 DP(NETIF_MSG_LINK, "1G half not supported\n");
9201 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9202 DP(NETIF_MSG_LINK, "1G full not supported\n");
9206 advertising = (ADVERTISED_1000baseT_Full |
9211 if (cmd->duplex != DUPLEX_FULL) {
9213 "2.5G half not supported\n");
9217 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9219 "2.5G full not supported\n");
9223 advertising = (ADVERTISED_2500baseX_Full |
9228 if (cmd->duplex != DUPLEX_FULL) {
9229 DP(NETIF_MSG_LINK, "10G half not supported\n");
9233 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9234 DP(NETIF_MSG_LINK, "10G full not supported\n");
9238 advertising = (ADVERTISED_10000baseT_Full |
9243 DP(NETIF_MSG_LINK, "Unsupported speed\n");
9247 bp->link_params.req_line_speed = cmd->speed;
9248 bp->link_params.req_duplex = cmd->duplex;
9249 bp->port.advertising = advertising;
9252 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9253 DP_LEVEL " req_duplex %d advertising 0x%x\n",
9254 bp->link_params.req_line_speed, bp->link_params.req_duplex,
9255 bp->port.advertising);
9257 if (netif_running(dev)) {
9258 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9265 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9266 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9268 static int bnx2x_get_regs_len(struct net_device *dev)
9270 struct bnx2x *bp = netdev_priv(dev);
9271 int regdump_len = 0;
9274 if (CHIP_IS_E1(bp)) {
9275 for (i = 0; i < REGS_COUNT; i++)
9276 if (IS_E1_ONLINE(reg_addrs[i].info))
9277 regdump_len += reg_addrs[i].size;
9279 for (i = 0; i < WREGS_COUNT_E1; i++)
9280 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9281 regdump_len += wreg_addrs_e1[i].size *
9282 (1 + wreg_addrs_e1[i].read_regs_count);
9285 for (i = 0; i < REGS_COUNT; i++)
9286 if (IS_E1H_ONLINE(reg_addrs[i].info))
9287 regdump_len += reg_addrs[i].size;
9289 for (i = 0; i < WREGS_COUNT_E1H; i++)
9290 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9291 regdump_len += wreg_addrs_e1h[i].size *
9292 (1 + wreg_addrs_e1h[i].read_regs_count);
9295 regdump_len += sizeof(struct dump_hdr);
9300 static void bnx2x_get_regs(struct net_device *dev,
9301 struct ethtool_regs *regs, void *_p)
9304 struct bnx2x *bp = netdev_priv(dev);
9305 struct dump_hdr dump_hdr = {0};
9308 memset(p, 0, regs->len);
9310 if (!netif_running(bp->dev))
9313 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9314 dump_hdr.dump_sign = dump_sign_all;
9315 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9316 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9317 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9318 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9319 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9321 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9322 p += dump_hdr.hdr_size + 1;
9324 if (CHIP_IS_E1(bp)) {
9325 for (i = 0; i < REGS_COUNT; i++)
9326 if (IS_E1_ONLINE(reg_addrs[i].info))
9327 for (j = 0; j < reg_addrs[i].size; j++)
9329 reg_addrs[i].addr + j*4);
9332 for (i = 0; i < REGS_COUNT; i++)
9333 if (IS_E1H_ONLINE(reg_addrs[i].info))
9334 for (j = 0; j < reg_addrs[i].size; j++)
9336 reg_addrs[i].addr + j*4);
9340 #define PHY_FW_VER_LEN 10
9342 static void bnx2x_get_drvinfo(struct net_device *dev,
9343 struct ethtool_drvinfo *info)
9345 struct bnx2x *bp = netdev_priv(dev);
9346 u8 phy_fw_ver[PHY_FW_VER_LEN];
9348 strcpy(info->driver, DRV_MODULE_NAME);
9349 strcpy(info->version, DRV_MODULE_VERSION);
9351 phy_fw_ver[0] = '\0';
9353 bnx2x_acquire_phy_lock(bp);
9354 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9355 (bp->state != BNX2X_STATE_CLOSED),
9356 phy_fw_ver, PHY_FW_VER_LEN);
9357 bnx2x_release_phy_lock(bp);
9360 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9361 (bp->common.bc_ver & 0xff0000) >> 16,
9362 (bp->common.bc_ver & 0xff00) >> 8,
9363 (bp->common.bc_ver & 0xff),
9364 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9365 strcpy(info->bus_info, pci_name(bp->pdev));
9366 info->n_stats = BNX2X_NUM_STATS;
9367 info->testinfo_len = BNX2X_NUM_TESTS;
9368 info->eedump_len = bp->common.flash_size;
9369 info->regdump_len = bnx2x_get_regs_len(dev);
9372 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9374 struct bnx2x *bp = netdev_priv(dev);
9376 if (bp->flags & NO_WOL_FLAG) {
9380 wol->supported = WAKE_MAGIC;
9382 wol->wolopts = WAKE_MAGIC;
9386 memset(&wol->sopass, 0, sizeof(wol->sopass));
9389 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9391 struct bnx2x *bp = netdev_priv(dev);
9393 if (wol->wolopts & ~WAKE_MAGIC)
9396 if (wol->wolopts & WAKE_MAGIC) {
9397 if (bp->flags & NO_WOL_FLAG)
9407 static u32 bnx2x_get_msglevel(struct net_device *dev)
9409 struct bnx2x *bp = netdev_priv(dev);
9411 return bp->msglevel;
9414 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9416 struct bnx2x *bp = netdev_priv(dev);
9418 if (capable(CAP_NET_ADMIN))
9419 bp->msglevel = level;
9422 static int bnx2x_nway_reset(struct net_device *dev)
9424 struct bnx2x *bp = netdev_priv(dev);
9429 if (netif_running(dev)) {
9430 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9437 static u32 bnx2x_get_link(struct net_device *dev)
9439 struct bnx2x *bp = netdev_priv(dev);
9441 if (bp->flags & MF_FUNC_DIS)
9444 return bp->link_vars.link_up;
9447 static int bnx2x_get_eeprom_len(struct net_device *dev)
9449 struct bnx2x *bp = netdev_priv(dev);
9451 return bp->common.flash_size;
9454 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9456 int port = BP_PORT(bp);
9460 /* adjust timeout for emulation/FPGA */
9461 count = NVRAM_TIMEOUT_COUNT;
9462 if (CHIP_REV_IS_SLOW(bp))
9465 /* request access to nvram interface */
9466 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9467 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9469 for (i = 0; i < count*10; i++) {
9470 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9471 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9477 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9478 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9485 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9487 int port = BP_PORT(bp);
9491 /* adjust timeout for emulation/FPGA */
9492 count = NVRAM_TIMEOUT_COUNT;
9493 if (CHIP_REV_IS_SLOW(bp))
9496 /* relinquish nvram interface */
9497 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9498 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9500 for (i = 0; i < count*10; i++) {
9501 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9502 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9508 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9509 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9516 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9520 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9522 /* enable both bits, even on read */
9523 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9524 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9525 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9528 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9532 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9534 /* disable both bits, even after read */
9535 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9536 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9537 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9540 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9546 /* build the command word */
9547 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9549 /* need to clear DONE bit separately */
9550 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9552 /* address of the NVRAM to read from */
9553 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9554 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9556 /* issue a read command */
9557 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9559 /* adjust timeout for emulation/FPGA */
9560 count = NVRAM_TIMEOUT_COUNT;
9561 if (CHIP_REV_IS_SLOW(bp))
9564 /* wait for completion */
9567 for (i = 0; i < count; i++) {
9569 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9571 if (val & MCPR_NVM_COMMAND_DONE) {
9572 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9573 /* we read nvram data in cpu order
9574 * but ethtool sees it as an array of bytes
9575 * converting to big-endian will do the work */
9576 *ret_val = cpu_to_be32(val);
9585 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9592 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9594 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9599 if (offset + buf_size > bp->common.flash_size) {
9600 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9601 " buf_size (0x%x) > flash_size (0x%x)\n",
9602 offset, buf_size, bp->common.flash_size);
9606 /* request access to nvram interface */
9607 rc = bnx2x_acquire_nvram_lock(bp);
9611 /* enable access to nvram interface */
9612 bnx2x_enable_nvram_access(bp);
9614 /* read the first word(s) */
9615 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9616 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9617 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9618 memcpy(ret_buf, &val, 4);
9620 /* advance to the next dword */
9621 offset += sizeof(u32);
9622 ret_buf += sizeof(u32);
9623 buf_size -= sizeof(u32);
9628 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9629 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9630 memcpy(ret_buf, &val, 4);
9633 /* disable access to nvram interface */
9634 bnx2x_disable_nvram_access(bp);
9635 bnx2x_release_nvram_lock(bp);
9640 static int bnx2x_get_eeprom(struct net_device *dev,
9641 struct ethtool_eeprom *eeprom, u8 *eebuf)
9643 struct bnx2x *bp = netdev_priv(dev);
9646 if (!netif_running(dev))
9649 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9650 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9651 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9652 eeprom->len, eeprom->len);
9654 /* parameters already validated in ethtool_get_eeprom */
9656 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9661 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9666 /* build the command word */
9667 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9669 /* need to clear DONE bit separately */
9670 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9672 /* write the data */
9673 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9675 /* address of the NVRAM to write to */
9676 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9677 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9679 /* issue the write command */
9680 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9682 /* adjust timeout for emulation/FPGA */
9683 count = NVRAM_TIMEOUT_COUNT;
9684 if (CHIP_REV_IS_SLOW(bp))
9687 /* wait for completion */
9689 for (i = 0; i < count; i++) {
9691 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9692 if (val & MCPR_NVM_COMMAND_DONE) {
9701 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9703 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9711 if (offset + buf_size > bp->common.flash_size) {
9712 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9713 " buf_size (0x%x) > flash_size (0x%x)\n",
9714 offset, buf_size, bp->common.flash_size);
9718 /* request access to nvram interface */
9719 rc = bnx2x_acquire_nvram_lock(bp);
9723 /* enable access to nvram interface */
9724 bnx2x_enable_nvram_access(bp);
9726 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9727 align_offset = (offset & ~0x03);
9728 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9731 val &= ~(0xff << BYTE_OFFSET(offset));
9732 val |= (*data_buf << BYTE_OFFSET(offset));
9734 /* nvram data is returned as an array of bytes
9735 * convert it back to cpu order */
9736 val = be32_to_cpu(val);
9738 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9742 /* disable access to nvram interface */
9743 bnx2x_disable_nvram_access(bp);
9744 bnx2x_release_nvram_lock(bp);
9749 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9757 if (buf_size == 1) /* ethtool */
9758 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9760 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9762 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9767 if (offset + buf_size > bp->common.flash_size) {
9768 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9769 " buf_size (0x%x) > flash_size (0x%x)\n",
9770 offset, buf_size, bp->common.flash_size);
9774 /* request access to nvram interface */
9775 rc = bnx2x_acquire_nvram_lock(bp);
9779 /* enable access to nvram interface */
9780 bnx2x_enable_nvram_access(bp);
9783 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9784 while ((written_so_far < buf_size) && (rc == 0)) {
9785 if (written_so_far == (buf_size - sizeof(u32)))
9786 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9787 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9788 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9789 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9790 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9792 memcpy(&val, data_buf, 4);
9794 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9796 /* advance to the next dword */
9797 offset += sizeof(u32);
9798 data_buf += sizeof(u32);
9799 written_so_far += sizeof(u32);
9803 /* disable access to nvram interface */
9804 bnx2x_disable_nvram_access(bp);
9805 bnx2x_release_nvram_lock(bp);
9810 static int bnx2x_set_eeprom(struct net_device *dev,
9811 struct ethtool_eeprom *eeprom, u8 *eebuf)
9813 struct bnx2x *bp = netdev_priv(dev);
9814 int port = BP_PORT(bp);
9817 if (!netif_running(dev))
9820 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9821 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9822 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9823 eeprom->len, eeprom->len);
9825 /* parameters already validated in ethtool_set_eeprom */
9827 /* PHY eeprom can be accessed only by the PMF */
9828 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9832 if (eeprom->magic == 0x50485950) {
9833 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9834 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9836 bnx2x_acquire_phy_lock(bp);
9837 rc |= bnx2x_link_reset(&bp->link_params,
9839 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9840 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9841 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9842 MISC_REGISTERS_GPIO_HIGH, port);
9843 bnx2x_release_phy_lock(bp);
9844 bnx2x_link_report(bp);
9846 } else if (eeprom->magic == 0x50485952) {
9847 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9848 if (bp->state == BNX2X_STATE_OPEN) {
9849 bnx2x_acquire_phy_lock(bp);
9850 rc |= bnx2x_link_reset(&bp->link_params,
9853 rc |= bnx2x_phy_init(&bp->link_params,
9855 bnx2x_release_phy_lock(bp);
9856 bnx2x_calc_fc_adv(bp);
9858 } else if (eeprom->magic == 0x53985943) {
9859 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9860 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9861 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9863 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9865 /* DSP Remove Download Mode */
9866 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9867 MISC_REGISTERS_GPIO_LOW, port);
9869 bnx2x_acquire_phy_lock(bp);
9871 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9873 /* wait 0.5 sec to allow it to run */
9875 bnx2x_ext_phy_hw_reset(bp, port);
9877 bnx2x_release_phy_lock(bp);
9880 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9885 static int bnx2x_get_coalesce(struct net_device *dev,
9886 struct ethtool_coalesce *coal)
9888 struct bnx2x *bp = netdev_priv(dev);
9890 memset(coal, 0, sizeof(struct ethtool_coalesce));
9892 coal->rx_coalesce_usecs = bp->rx_ticks;
9893 coal->tx_coalesce_usecs = bp->tx_ticks;
9898 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9899 static int bnx2x_set_coalesce(struct net_device *dev,
9900 struct ethtool_coalesce *coal)
9902 struct bnx2x *bp = netdev_priv(dev);
9904 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9905 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9906 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9908 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9909 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9910 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9912 if (netif_running(dev))
9913 bnx2x_update_coalesce(bp);
9918 static void bnx2x_get_ringparam(struct net_device *dev,
9919 struct ethtool_ringparam *ering)
9921 struct bnx2x *bp = netdev_priv(dev);
9923 ering->rx_max_pending = MAX_RX_AVAIL;
9924 ering->rx_mini_max_pending = 0;
9925 ering->rx_jumbo_max_pending = 0;
9927 ering->rx_pending = bp->rx_ring_size;
9928 ering->rx_mini_pending = 0;
9929 ering->rx_jumbo_pending = 0;
9931 ering->tx_max_pending = MAX_TX_AVAIL;
9932 ering->tx_pending = bp->tx_ring_size;
9935 static int bnx2x_set_ringparam(struct net_device *dev,
9936 struct ethtool_ringparam *ering)
9938 struct bnx2x *bp = netdev_priv(dev);
9941 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9942 (ering->tx_pending > MAX_TX_AVAIL) ||
9943 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9946 bp->rx_ring_size = ering->rx_pending;
9947 bp->tx_ring_size = ering->tx_pending;
9949 if (netif_running(dev)) {
9950 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9951 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9957 static void bnx2x_get_pauseparam(struct net_device *dev,
9958 struct ethtool_pauseparam *epause)
9960 struct bnx2x *bp = netdev_priv(dev);
9962 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9963 BNX2X_FLOW_CTRL_AUTO) &&
9964 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9966 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9967 BNX2X_FLOW_CTRL_RX);
9968 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9969 BNX2X_FLOW_CTRL_TX);
9971 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9972 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9973 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9976 static int bnx2x_set_pauseparam(struct net_device *dev,
9977 struct ethtool_pauseparam *epause)
9979 struct bnx2x *bp = netdev_priv(dev);
9984 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9985 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9986 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9988 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9990 if (epause->rx_pause)
9991 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9993 if (epause->tx_pause)
9994 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9996 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9997 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9999 if (epause->autoneg) {
10000 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10001 DP(NETIF_MSG_LINK, "autoneg not supported\n");
10005 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10006 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10010 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10012 if (netif_running(dev)) {
10013 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10014 bnx2x_link_set(bp);
10020 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10022 struct bnx2x *bp = netdev_priv(dev);
10026 /* TPA requires Rx CSUM offloading */
10027 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10028 if (!(dev->features & NETIF_F_LRO)) {
10029 dev->features |= NETIF_F_LRO;
10030 bp->flags |= TPA_ENABLE_FLAG;
10034 } else if (dev->features & NETIF_F_LRO) {
10035 dev->features &= ~NETIF_F_LRO;
10036 bp->flags &= ~TPA_ENABLE_FLAG;
10040 if (changed && netif_running(dev)) {
10041 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10042 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10048 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10050 struct bnx2x *bp = netdev_priv(dev);
10052 return bp->rx_csum;
10055 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10057 struct bnx2x *bp = netdev_priv(dev);
10060 bp->rx_csum = data;
10062 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10063 TPA'ed packets will be discarded due to wrong TCP CSUM */
10065 u32 flags = ethtool_op_get_flags(dev);
10067 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10073 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10076 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10077 dev->features |= NETIF_F_TSO6;
10079 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10080 dev->features &= ~NETIF_F_TSO6;
10086 static const struct {
10087 char string[ETH_GSTRING_LEN];
10088 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10089 { "register_test (offline)" },
10090 { "memory_test (offline)" },
10091 { "loopback_test (offline)" },
10092 { "nvram_test (online)" },
10093 { "interrupt_test (online)" },
10094 { "link_test (online)" },
10095 { "idle check (online)" }
10098 static int bnx2x_test_registers(struct bnx2x *bp)
10100 int idx, i, rc = -ENODEV;
10102 int port = BP_PORT(bp);
10103 static const struct {
10108 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10109 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10110 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10111 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10112 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10113 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10114 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10115 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10116 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10117 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10118 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10119 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10120 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10121 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10122 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10123 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10124 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10125 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
10126 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
10127 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10128 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
10129 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10130 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10131 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10132 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10133 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10134 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10135 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10136 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
10137 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10138 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
10139 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10140 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10141 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10142 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10143 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10144 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10146 { 0xffffffff, 0, 0x00000000 }
10149 if (!netif_running(bp->dev))
10152 /* Repeat the test twice:
10153 First by writing 0x00000000, second by writing 0xffffffff */
10154 for (idx = 0; idx < 2; idx++) {
10161 wr_val = 0xffffffff;
10165 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10166 u32 offset, mask, save_val, val;
10168 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10169 mask = reg_tbl[i].mask;
10171 save_val = REG_RD(bp, offset);
10173 REG_WR(bp, offset, wr_val);
10174 val = REG_RD(bp, offset);
10176 /* Restore the original register's value */
10177 REG_WR(bp, offset, save_val);
10179 /* verify that value is as expected value */
10180 if ((val & mask) != (wr_val & mask))
10181 goto test_reg_exit;
10191 static int bnx2x_test_memory(struct bnx2x *bp)
10193 int i, j, rc = -ENODEV;
10195 static const struct {
10199 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10200 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10201 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10202 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10203 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10204 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10205 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10209 static const struct {
10215 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10216 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10217 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10218 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10219 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10220 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10222 { NULL, 0xffffffff, 0, 0 }
10225 if (!netif_running(bp->dev))
10228 /* Go through all the memories */
10229 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10230 for (j = 0; j < mem_tbl[i].size; j++)
10231 REG_RD(bp, mem_tbl[i].offset + j*4);
10233 /* Check the parity status */
10234 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10235 val = REG_RD(bp, prty_tbl[i].offset);
10236 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10237 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10239 "%s is 0x%x\n", prty_tbl[i].name, val);
10240 goto test_mem_exit;
10250 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10255 while (bnx2x_link_test(bp) && cnt--)
10259 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10261 unsigned int pkt_size, num_pkts, i;
10262 struct sk_buff *skb;
10263 unsigned char *packet;
10264 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10265 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10266 u16 tx_start_idx, tx_idx;
10267 u16 rx_start_idx, rx_idx;
10268 u16 pkt_prod, bd_prod;
10269 struct sw_tx_bd *tx_buf;
10270 struct eth_tx_start_bd *tx_start_bd;
10271 struct eth_tx_parse_bd *pbd = NULL;
10272 dma_addr_t mapping;
10273 union eth_rx_cqe *cqe;
10275 struct sw_rx_bd *rx_buf;
10279 /* check the loopback mode */
10280 switch (loopback_mode) {
10281 case BNX2X_PHY_LOOPBACK:
10282 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10285 case BNX2X_MAC_LOOPBACK:
10286 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10287 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10293 /* prepare the loopback packet */
10294 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10295 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10296 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10299 goto test_loopback_exit;
10301 packet = skb_put(skb, pkt_size);
10302 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10303 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10304 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10305 for (i = ETH_HLEN; i < pkt_size; i++)
10306 packet[i] = (unsigned char) (i & 0xff);
10308 /* send the loopback packet */
10310 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10311 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10313 pkt_prod = fp_tx->tx_pkt_prod++;
10314 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10315 tx_buf->first_bd = fp_tx->tx_bd_prod;
10319 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10320 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10321 mapping = pci_map_single(bp->pdev, skb->data,
10322 skb_headlen(skb), PCI_DMA_TODEVICE);
10323 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10324 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10325 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10326 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10327 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10328 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10329 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10330 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10332 /* turn on parsing and get a BD */
10333 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10334 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10336 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10340 fp_tx->tx_db.data.prod += 2;
10342 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10347 fp_tx->tx_bd_prod += 2; /* start + pbd */
10348 bp->dev->trans_start = jiffies;
10352 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10353 if (tx_idx != tx_start_idx + num_pkts)
10354 goto test_loopback_exit;
10356 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10357 if (rx_idx != rx_start_idx + num_pkts)
10358 goto test_loopback_exit;
10360 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10361 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10362 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10363 goto test_loopback_rx_exit;
10365 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10366 if (len != pkt_size)
10367 goto test_loopback_rx_exit;
10369 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10371 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10372 for (i = ETH_HLEN; i < pkt_size; i++)
10373 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10374 goto test_loopback_rx_exit;
10378 test_loopback_rx_exit:
10380 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10381 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10382 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10383 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10385 /* Update producers */
10386 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10387 fp_rx->rx_sge_prod);
10389 test_loopback_exit:
10390 bp->link_params.loopback_mode = LOOPBACK_NONE;
10395 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10399 if (!netif_running(bp->dev))
10400 return BNX2X_LOOPBACK_FAILED;
10402 bnx2x_netif_stop(bp, 1);
10403 bnx2x_acquire_phy_lock(bp);
10405 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10407 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10408 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10411 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10413 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10414 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10417 bnx2x_release_phy_lock(bp);
10418 bnx2x_netif_start(bp);
10423 #define CRC32_RESIDUAL 0xdebb20e3
10425 static int bnx2x_test_nvram(struct bnx2x *bp)
10427 static const struct {
10431 { 0, 0x14 }, /* bootstrap */
10432 { 0x14, 0xec }, /* dir */
10433 { 0x100, 0x350 }, /* manuf_info */
10434 { 0x450, 0xf0 }, /* feature_info */
10435 { 0x640, 0x64 }, /* upgrade_key_info */
10437 { 0x708, 0x70 }, /* manuf_key_info */
10441 __be32 buf[0x350 / 4];
10442 u8 *data = (u8 *)buf;
10446 rc = bnx2x_nvram_read(bp, 0, data, 4);
10448 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10449 goto test_nvram_exit;
10452 magic = be32_to_cpu(buf[0]);
10453 if (magic != 0x669955aa) {
10454 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10456 goto test_nvram_exit;
10459 for (i = 0; nvram_tbl[i].size; i++) {
10461 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10462 nvram_tbl[i].size);
10464 DP(NETIF_MSG_PROBE,
10465 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10466 goto test_nvram_exit;
10469 crc = ether_crc_le(nvram_tbl[i].size, data);
10470 if (crc != CRC32_RESIDUAL) {
10471 DP(NETIF_MSG_PROBE,
10472 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10474 goto test_nvram_exit;
10482 static int bnx2x_test_intr(struct bnx2x *bp)
10484 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10487 if (!netif_running(bp->dev))
10490 config->hdr.length = 0;
10491 if (CHIP_IS_E1(bp))
10492 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10494 config->hdr.offset = BP_FUNC(bp);
10495 config->hdr.client_id = bp->fp->cl_id;
10496 config->hdr.reserved1 = 0;
10498 bp->set_mac_pending++;
10500 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10501 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10502 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10504 for (i = 0; i < 10; i++) {
10505 if (!bp->set_mac_pending)
10508 msleep_interruptible(10);
10517 static void bnx2x_self_test(struct net_device *dev,
10518 struct ethtool_test *etest, u64 *buf)
10520 struct bnx2x *bp = netdev_priv(dev);
10522 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10524 if (!netif_running(dev))
10527 /* offline tests are not supported in MF mode */
10529 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10531 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10532 int port = BP_PORT(bp);
10536 /* save current value of input enable for TX port IF */
10537 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10538 /* disable input for TX port IF */
10539 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10541 link_up = bp->link_vars.link_up;
10542 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10543 bnx2x_nic_load(bp, LOAD_DIAG);
10544 /* wait until link state is restored */
10545 bnx2x_wait_for_link(bp, link_up);
10547 if (bnx2x_test_registers(bp) != 0) {
10549 etest->flags |= ETH_TEST_FL_FAILED;
10551 if (bnx2x_test_memory(bp) != 0) {
10553 etest->flags |= ETH_TEST_FL_FAILED;
10555 buf[2] = bnx2x_test_loopback(bp, link_up);
10557 etest->flags |= ETH_TEST_FL_FAILED;
10559 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10561 /* restore input for TX port IF */
10562 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10564 bnx2x_nic_load(bp, LOAD_NORMAL);
10565 /* wait until link state is restored */
10566 bnx2x_wait_for_link(bp, link_up);
10568 if (bnx2x_test_nvram(bp) != 0) {
10570 etest->flags |= ETH_TEST_FL_FAILED;
10572 if (bnx2x_test_intr(bp) != 0) {
10574 etest->flags |= ETH_TEST_FL_FAILED;
10577 if (bnx2x_link_test(bp) != 0) {
10579 etest->flags |= ETH_TEST_FL_FAILED;
10582 #ifdef BNX2X_EXTRA_DEBUG
10583 bnx2x_panic_dump(bp);
10587 static const struct {
10590 u8 string[ETH_GSTRING_LEN];
10591 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10592 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10593 { Q_STATS_OFFSET32(error_bytes_received_hi),
10594 8, "[%d]: rx_error_bytes" },
10595 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10596 8, "[%d]: rx_ucast_packets" },
10597 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10598 8, "[%d]: rx_mcast_packets" },
10599 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10600 8, "[%d]: rx_bcast_packets" },
10601 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10602 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10603 4, "[%d]: rx_phy_ip_err_discards"},
10604 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10605 4, "[%d]: rx_skb_alloc_discard" },
10606 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10608 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10609 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10610 8, "[%d]: tx_packets" }
10613 static const struct {
10617 #define STATS_FLAGS_PORT 1
10618 #define STATS_FLAGS_FUNC 2
10619 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10620 u8 string[ETH_GSTRING_LEN];
10621 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10622 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10623 8, STATS_FLAGS_BOTH, "rx_bytes" },
10624 { STATS_OFFSET32(error_bytes_received_hi),
10625 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10626 { STATS_OFFSET32(total_unicast_packets_received_hi),
10627 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10628 { STATS_OFFSET32(total_multicast_packets_received_hi),
10629 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10630 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10631 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10632 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10633 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10634 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10635 8, STATS_FLAGS_PORT, "rx_align_errors" },
10636 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10637 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10638 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10639 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10640 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10641 8, STATS_FLAGS_PORT, "rx_fragments" },
10642 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10643 8, STATS_FLAGS_PORT, "rx_jabbers" },
10644 { STATS_OFFSET32(no_buff_discard_hi),
10645 8, STATS_FLAGS_BOTH, "rx_discards" },
10646 { STATS_OFFSET32(mac_filter_discard),
10647 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10648 { STATS_OFFSET32(xxoverflow_discard),
10649 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10650 { STATS_OFFSET32(brb_drop_hi),
10651 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10652 { STATS_OFFSET32(brb_truncate_hi),
10653 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10654 { STATS_OFFSET32(pause_frames_received_hi),
10655 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10656 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10657 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10658 { STATS_OFFSET32(nig_timer_max),
10659 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10660 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10661 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10662 { STATS_OFFSET32(rx_skb_alloc_failed),
10663 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10664 { STATS_OFFSET32(hw_csum_err),
10665 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10667 { STATS_OFFSET32(total_bytes_transmitted_hi),
10668 8, STATS_FLAGS_BOTH, "tx_bytes" },
10669 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10670 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10671 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10672 8, STATS_FLAGS_BOTH, "tx_packets" },
10673 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10674 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10675 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10676 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10677 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10678 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10679 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10680 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10681 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10682 8, STATS_FLAGS_PORT, "tx_deferred" },
10683 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10684 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10685 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10686 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10687 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10688 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10689 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10690 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10691 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10692 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10693 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10694 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10695 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10696 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10697 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10698 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10699 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10700 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10701 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10702 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10703 { STATS_OFFSET32(pause_frames_sent_hi),
10704 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10707 #define IS_PORT_STAT(i) \
10708 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10709 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10710 #define IS_E1HMF_MODE_STAT(bp) \
10711 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10713 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10715 struct bnx2x *bp = netdev_priv(dev);
10718 switch(stringset) {
10720 if (is_multi(bp)) {
10721 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10722 if (!IS_E1HMF_MODE_STAT(bp))
10723 num_stats += BNX2X_NUM_STATS;
10725 if (IS_E1HMF_MODE_STAT(bp)) {
10727 for (i = 0; i < BNX2X_NUM_STATS; i++)
10728 if (IS_FUNC_STAT(i))
10731 num_stats = BNX2X_NUM_STATS;
10736 return BNX2X_NUM_TESTS;
10743 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10745 struct bnx2x *bp = netdev_priv(dev);
10748 switch (stringset) {
10750 if (is_multi(bp)) {
10752 for_each_rx_queue(bp, i) {
10753 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10754 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10755 bnx2x_q_stats_arr[j].string, i);
10756 k += BNX2X_NUM_Q_STATS;
10758 if (IS_E1HMF_MODE_STAT(bp))
10760 for (j = 0; j < BNX2X_NUM_STATS; j++)
10761 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10762 bnx2x_stats_arr[j].string);
10764 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10765 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10767 strcpy(buf + j*ETH_GSTRING_LEN,
10768 bnx2x_stats_arr[i].string);
10775 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10780 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10781 struct ethtool_stats *stats, u64 *buf)
10783 struct bnx2x *bp = netdev_priv(dev);
10784 u32 *hw_stats, *offset;
10787 if (is_multi(bp)) {
10789 for_each_rx_queue(bp, i) {
10790 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10791 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10792 if (bnx2x_q_stats_arr[j].size == 0) {
10793 /* skip this counter */
10797 offset = (hw_stats +
10798 bnx2x_q_stats_arr[j].offset);
10799 if (bnx2x_q_stats_arr[j].size == 4) {
10800 /* 4-byte counter */
10801 buf[k + j] = (u64) *offset;
10804 /* 8-byte counter */
10805 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10807 k += BNX2X_NUM_Q_STATS;
10809 if (IS_E1HMF_MODE_STAT(bp))
10811 hw_stats = (u32 *)&bp->eth_stats;
10812 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10813 if (bnx2x_stats_arr[j].size == 0) {
10814 /* skip this counter */
10818 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10819 if (bnx2x_stats_arr[j].size == 4) {
10820 /* 4-byte counter */
10821 buf[k + j] = (u64) *offset;
10824 /* 8-byte counter */
10825 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10828 hw_stats = (u32 *)&bp->eth_stats;
10829 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10830 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10832 if (bnx2x_stats_arr[i].size == 0) {
10833 /* skip this counter */
10838 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10839 if (bnx2x_stats_arr[i].size == 4) {
10840 /* 4-byte counter */
10841 buf[j] = (u64) *offset;
10845 /* 8-byte counter */
10846 buf[j] = HILO_U64(*offset, *(offset + 1));
10852 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10854 struct bnx2x *bp = netdev_priv(dev);
10855 int port = BP_PORT(bp);
10858 if (!netif_running(dev))
10867 for (i = 0; i < (data * 2); i++) {
10869 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10870 bp->link_params.hw_led_mode,
10871 bp->link_params.chip_id);
10873 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10874 bp->link_params.hw_led_mode,
10875 bp->link_params.chip_id);
10877 msleep_interruptible(500);
10878 if (signal_pending(current))
10882 if (bp->link_vars.link_up)
10883 bnx2x_set_led(bp, port, LED_MODE_OPER,
10884 bp->link_vars.line_speed,
10885 bp->link_params.hw_led_mode,
10886 bp->link_params.chip_id);
10891 static const struct ethtool_ops bnx2x_ethtool_ops = {
10892 .get_settings = bnx2x_get_settings,
10893 .set_settings = bnx2x_set_settings,
10894 .get_drvinfo = bnx2x_get_drvinfo,
10895 .get_regs_len = bnx2x_get_regs_len,
10896 .get_regs = bnx2x_get_regs,
10897 .get_wol = bnx2x_get_wol,
10898 .set_wol = bnx2x_set_wol,
10899 .get_msglevel = bnx2x_get_msglevel,
10900 .set_msglevel = bnx2x_set_msglevel,
10901 .nway_reset = bnx2x_nway_reset,
10902 .get_link = bnx2x_get_link,
10903 .get_eeprom_len = bnx2x_get_eeprom_len,
10904 .get_eeprom = bnx2x_get_eeprom,
10905 .set_eeprom = bnx2x_set_eeprom,
10906 .get_coalesce = bnx2x_get_coalesce,
10907 .set_coalesce = bnx2x_set_coalesce,
10908 .get_ringparam = bnx2x_get_ringparam,
10909 .set_ringparam = bnx2x_set_ringparam,
10910 .get_pauseparam = bnx2x_get_pauseparam,
10911 .set_pauseparam = bnx2x_set_pauseparam,
10912 .get_rx_csum = bnx2x_get_rx_csum,
10913 .set_rx_csum = bnx2x_set_rx_csum,
10914 .get_tx_csum = ethtool_op_get_tx_csum,
10915 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10916 .set_flags = bnx2x_set_flags,
10917 .get_flags = ethtool_op_get_flags,
10918 .get_sg = ethtool_op_get_sg,
10919 .set_sg = ethtool_op_set_sg,
10920 .get_tso = ethtool_op_get_tso,
10921 .set_tso = bnx2x_set_tso,
10922 .self_test = bnx2x_self_test,
10923 .get_sset_count = bnx2x_get_sset_count,
10924 .get_strings = bnx2x_get_strings,
10925 .phys_id = bnx2x_phys_id,
10926 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10929 /* end of ethtool_ops */
10931 /****************************************************************************
10932 * General service functions
10933 ****************************************************************************/
10935 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10939 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10943 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10944 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10945 PCI_PM_CTRL_PME_STATUS));
10947 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10948 /* delay required during transition out of D3hot */
10953 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10957 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10959 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10962 /* No more memory access after this point until
10963 * device is brought back to D0.
10973 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10977 /* Tell compiler that status block fields can change */
10979 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10980 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10982 return (fp->rx_comp_cons != rx_cons_sb);
10986 * net_device service functions
10989 static int bnx2x_poll(struct napi_struct *napi, int budget)
10991 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10993 struct bnx2x *bp = fp->bp;
10996 #ifdef BNX2X_STOP_ON_ERROR
10997 if (unlikely(bp->panic))
11001 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
11002 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
11004 bnx2x_update_fpsb_idx(fp);
11006 if (bnx2x_has_rx_work(fp)) {
11007 work_done = bnx2x_rx_int(fp, budget);
11009 /* must not complete if we consumed full budget */
11010 if (work_done >= budget)
11014 /* bnx2x_has_rx_work() reads the status block, thus we need to
11015 * ensure that status block indices have been actually read
11016 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
11017 * so that we won't write the "newer" value of the status block to IGU
11018 * (if there was a DMA right after bnx2x_has_rx_work and
11019 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11020 * may be postponed to right before bnx2x_ack_sb). In this case
11021 * there will never be another interrupt until there is another update
11022 * of the status block, while there is still unhandled work.
11026 if (!bnx2x_has_rx_work(fp)) {
11027 #ifdef BNX2X_STOP_ON_ERROR
11030 napi_complete(napi);
11032 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11033 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
11034 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11035 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11043 /* we split the first BD into headers and data BDs
11044 * to ease the pain of our fellow microcode engineers
11045 * we use one mapping for both BDs
11046 * So far this has only been observed to happen
11047 * in Other Operating Systems(TM)
11049 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11050 struct bnx2x_fastpath *fp,
11051 struct sw_tx_bd *tx_buf,
11052 struct eth_tx_start_bd **tx_bd, u16 hlen,
11053 u16 bd_prod, int nbd)
11055 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11056 struct eth_tx_bd *d_tx_bd;
11057 dma_addr_t mapping;
11058 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11060 /* first fix first BD */
11061 h_tx_bd->nbd = cpu_to_le16(nbd);
11062 h_tx_bd->nbytes = cpu_to_le16(hlen);
11064 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11065 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11066 h_tx_bd->addr_lo, h_tx_bd->nbd);
11068 /* now get a new data BD
11069 * (after the pbd) and fill it */
11070 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11071 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11073 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11074 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11076 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11077 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11078 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11080 /* this marks the BD as one that has no individual mapping */
11081 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11083 DP(NETIF_MSG_TX_QUEUED,
11084 "TSO split data size is %d (%x:%x)\n",
11085 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11088 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11093 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11096 csum = (u16) ~csum_fold(csum_sub(csum,
11097 csum_partial(t_header - fix, fix, 0)));
11100 csum = (u16) ~csum_fold(csum_add(csum,
11101 csum_partial(t_header, -fix, 0)));
11103 return swab16(csum);
11106 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11110 if (skb->ip_summed != CHECKSUM_PARTIAL)
11114 if (skb->protocol == htons(ETH_P_IPV6)) {
11116 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11117 rc |= XMIT_CSUM_TCP;
11121 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11122 rc |= XMIT_CSUM_TCP;
11126 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11129 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11135 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11136 /* check if packet requires linearization (packet is too fragmented)
11137 no need to check fragmentation if page size > 8K (there will be no
11138 violation to FW restrictions) */
11139 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11144 int first_bd_sz = 0;
11146 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11147 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11149 if (xmit_type & XMIT_GSO) {
11150 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11151 /* Check if LSO packet needs to be copied:
11152 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11153 int wnd_size = MAX_FETCH_BD - 3;
11154 /* Number of windows to check */
11155 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11160 /* Headers length */
11161 hlen = (int)(skb_transport_header(skb) - skb->data) +
11164 /* Amount of data (w/o headers) on linear part of SKB*/
11165 first_bd_sz = skb_headlen(skb) - hlen;
11167 wnd_sum = first_bd_sz;
11169 /* Calculate the first sum - it's special */
11170 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11172 skb_shinfo(skb)->frags[frag_idx].size;
11174 /* If there was data on linear skb data - check it */
11175 if (first_bd_sz > 0) {
11176 if (unlikely(wnd_sum < lso_mss)) {
11181 wnd_sum -= first_bd_sz;
11184 /* Others are easier: run through the frag list and
11185 check all windows */
11186 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11188 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11190 if (unlikely(wnd_sum < lso_mss)) {
11195 skb_shinfo(skb)->frags[wnd_idx].size;
11198 /* in non-LSO too fragmented packet should always
11205 if (unlikely(to_copy))
11206 DP(NETIF_MSG_TX_QUEUED,
11207 "Linearization IS REQUIRED for %s packet. "
11208 "num_frags %d hlen %d first_bd_sz %d\n",
11209 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11210 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11216 /* called with netif_tx_lock
11217 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11218 * netif_wake_queue()
11220 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11222 struct bnx2x *bp = netdev_priv(dev);
11223 struct bnx2x_fastpath *fp, *fp_stat;
11224 struct netdev_queue *txq;
11225 struct sw_tx_bd *tx_buf;
11226 struct eth_tx_start_bd *tx_start_bd;
11227 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11228 struct eth_tx_parse_bd *pbd = NULL;
11229 u16 pkt_prod, bd_prod;
11231 dma_addr_t mapping;
11232 u32 xmit_type = bnx2x_xmit_type(bp, skb);
11235 __le16 pkt_size = 0;
11237 #ifdef BNX2X_STOP_ON_ERROR
11238 if (unlikely(bp->panic))
11239 return NETDEV_TX_BUSY;
11242 fp_index = skb_get_queue_mapping(skb);
11243 txq = netdev_get_tx_queue(dev, fp_index);
11245 fp = &bp->fp[fp_index + bp->num_rx_queues];
11246 fp_stat = &bp->fp[fp_index];
11248 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11249 fp_stat->eth_q_stats.driver_xoff++;
11250 netif_tx_stop_queue(txq);
11251 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11252 return NETDEV_TX_BUSY;
11255 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11256 " gso type %x xmit_type %x\n",
11257 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11258 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11260 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11261 /* First, check if we need to linearize the skb (due to FW
11262 restrictions). No need to check fragmentation if page size > 8K
11263 (there will be no violation to FW restrictions) */
11264 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11265 /* Statistics of linearization */
11267 if (skb_linearize(skb) != 0) {
11268 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11269 "silently dropping this SKB\n");
11270 dev_kfree_skb_any(skb);
11271 return NETDEV_TX_OK;
11277 Please read carefully. First we use one BD which we mark as start,
11278 then we have a parsing info BD (used for TSO or xsum),
11279 and only then we have the rest of the TSO BDs.
11280 (don't forget to mark the last one as last,
11281 and to unmap only AFTER you write to the BD ...)
11282 And above all, all pdb sizes are in words - NOT DWORDS!
11285 pkt_prod = fp->tx_pkt_prod++;
11286 bd_prod = TX_BD(fp->tx_bd_prod);
11288 /* get a tx_buf and first BD */
11289 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11290 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11292 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11293 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11294 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11296 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11298 /* remember the first BD of the packet */
11299 tx_buf->first_bd = fp->tx_bd_prod;
11303 DP(NETIF_MSG_TX_QUEUED,
11304 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11305 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11308 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11309 (bp->flags & HW_VLAN_TX_FLAG)) {
11310 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11311 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11314 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11316 /* turn on parsing and get a BD */
11317 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11318 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11320 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11322 if (xmit_type & XMIT_CSUM) {
11323 hlen = (skb_network_header(skb) - skb->data) / 2;
11325 /* for now NS flag is not used in Linux */
11327 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11328 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11330 pbd->ip_hlen = (skb_transport_header(skb) -
11331 skb_network_header(skb)) / 2;
11333 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11335 pbd->total_hlen = cpu_to_le16(hlen);
11338 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11340 if (xmit_type & XMIT_CSUM_V4)
11341 tx_start_bd->bd_flags.as_bitfield |=
11342 ETH_TX_BD_FLAGS_IP_CSUM;
11344 tx_start_bd->bd_flags.as_bitfield |=
11345 ETH_TX_BD_FLAGS_IPV6;
11347 if (xmit_type & XMIT_CSUM_TCP) {
11348 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11351 s8 fix = SKB_CS_OFF(skb); /* signed! */
11353 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11355 DP(NETIF_MSG_TX_QUEUED,
11356 "hlen %d fix %d csum before fix %x\n",
11357 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11359 /* HW bug: fixup the CSUM */
11360 pbd->tcp_pseudo_csum =
11361 bnx2x_csum_fix(skb_transport_header(skb),
11364 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11365 pbd->tcp_pseudo_csum);
11369 mapping = pci_map_single(bp->pdev, skb->data,
11370 skb_headlen(skb), PCI_DMA_TODEVICE);
11372 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11373 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11374 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11375 tx_start_bd->nbd = cpu_to_le16(nbd);
11376 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11377 pkt_size = tx_start_bd->nbytes;
11379 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11380 " nbytes %d flags %x vlan %x\n",
11381 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11382 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11383 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11385 if (xmit_type & XMIT_GSO) {
11387 DP(NETIF_MSG_TX_QUEUED,
11388 "TSO packet len %d hlen %d total len %d tso size %d\n",
11389 skb->len, hlen, skb_headlen(skb),
11390 skb_shinfo(skb)->gso_size);
11392 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11394 if (unlikely(skb_headlen(skb) > hlen))
11395 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11396 hlen, bd_prod, ++nbd);
11398 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11399 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11400 pbd->tcp_flags = pbd_tcp_flags(skb);
11402 if (xmit_type & XMIT_GSO_V4) {
11403 pbd->ip_id = swab16(ip_hdr(skb)->id);
11404 pbd->tcp_pseudo_csum =
11405 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11406 ip_hdr(skb)->daddr,
11407 0, IPPROTO_TCP, 0));
11410 pbd->tcp_pseudo_csum =
11411 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11412 &ipv6_hdr(skb)->daddr,
11413 0, IPPROTO_TCP, 0));
11415 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11417 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11419 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11420 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11422 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11423 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11424 if (total_pkt_bd == NULL)
11425 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11427 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11428 frag->size, PCI_DMA_TODEVICE);
11430 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11431 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11432 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11433 le16_add_cpu(&pkt_size, frag->size);
11435 DP(NETIF_MSG_TX_QUEUED,
11436 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11437 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11438 le16_to_cpu(tx_data_bd->nbytes));
11441 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11443 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11445 /* now send a tx doorbell, counting the next BD
11446 * if the packet contains or ends with it
11448 if (TX_BD_POFF(bd_prod) < nbd)
11451 if (total_pkt_bd != NULL)
11452 total_pkt_bd->total_pkt_bytes = pkt_size;
11455 DP(NETIF_MSG_TX_QUEUED,
11456 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11457 " tcp_flags %x xsum %x seq %u hlen %u\n",
11458 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11459 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11460 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11462 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11465 * Make sure that the BD data is updated before updating the producer
11466 * since FW might read the BD right after the producer is updated.
11467 * This is only applicable for weak-ordered memory model archs such
11468 * as IA-64. The following barrier is also mandatory since FW will
11469 * assumes packets must have BDs.
11473 fp->tx_db.data.prod += nbd;
11475 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11479 fp->tx_bd_prod += nbd;
11481 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11482 netif_tx_stop_queue(txq);
11483 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11484 if we put Tx into XOFF state. */
11486 fp_stat->eth_q_stats.driver_xoff++;
11487 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11488 netif_tx_wake_queue(txq);
11492 return NETDEV_TX_OK;
11495 /* called with rtnl_lock */
11496 static int bnx2x_open(struct net_device *dev)
11498 struct bnx2x *bp = netdev_priv(dev);
11500 netif_carrier_off(dev);
11502 bnx2x_set_power_state(bp, PCI_D0);
11504 return bnx2x_nic_load(bp, LOAD_OPEN);
11507 /* called with rtnl_lock */
11508 static int bnx2x_close(struct net_device *dev)
11510 struct bnx2x *bp = netdev_priv(dev);
11512 /* Unload the driver, release IRQs */
11513 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11514 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11515 if (!CHIP_REV_IS_SLOW(bp))
11516 bnx2x_set_power_state(bp, PCI_D3hot);
11521 /* called with netif_tx_lock from dev_mcast.c */
11522 static void bnx2x_set_rx_mode(struct net_device *dev)
11524 struct bnx2x *bp = netdev_priv(dev);
11525 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11526 int port = BP_PORT(bp);
11528 if (bp->state != BNX2X_STATE_OPEN) {
11529 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11533 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11535 if (dev->flags & IFF_PROMISC)
11536 rx_mode = BNX2X_RX_MODE_PROMISC;
11538 else if ((dev->flags & IFF_ALLMULTI) ||
11539 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11540 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11542 else { /* some multicasts */
11543 if (CHIP_IS_E1(bp)) {
11544 int i, old, offset;
11545 struct dev_mc_list *mclist;
11546 struct mac_configuration_cmd *config =
11547 bnx2x_sp(bp, mcast_config);
11549 for (i = 0, mclist = dev->mc_list;
11550 mclist && (i < dev->mc_count);
11551 i++, mclist = mclist->next) {
11553 config->config_table[i].
11554 cam_entry.msb_mac_addr =
11555 swab16(*(u16 *)&mclist->dmi_addr[0]);
11556 config->config_table[i].
11557 cam_entry.middle_mac_addr =
11558 swab16(*(u16 *)&mclist->dmi_addr[2]);
11559 config->config_table[i].
11560 cam_entry.lsb_mac_addr =
11561 swab16(*(u16 *)&mclist->dmi_addr[4]);
11562 config->config_table[i].cam_entry.flags =
11564 config->config_table[i].
11565 target_table_entry.flags = 0;
11566 config->config_table[i].target_table_entry.
11567 clients_bit_vector =
11568 cpu_to_le32(1 << BP_L_ID(bp));
11569 config->config_table[i].
11570 target_table_entry.vlan_id = 0;
11573 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11574 config->config_table[i].
11575 cam_entry.msb_mac_addr,
11576 config->config_table[i].
11577 cam_entry.middle_mac_addr,
11578 config->config_table[i].
11579 cam_entry.lsb_mac_addr);
11581 old = config->hdr.length;
11583 for (; i < old; i++) {
11584 if (CAM_IS_INVALID(config->
11585 config_table[i])) {
11586 /* already invalidated */
11590 CAM_INVALIDATE(config->
11595 if (CHIP_REV_IS_SLOW(bp))
11596 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11598 offset = BNX2X_MAX_MULTICAST*(1 + port);
11600 config->hdr.length = i;
11601 config->hdr.offset = offset;
11602 config->hdr.client_id = bp->fp->cl_id;
11603 config->hdr.reserved1 = 0;
11605 bp->set_mac_pending++;
11608 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11609 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11610 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11613 /* Accept one or more multicasts */
11614 struct dev_mc_list *mclist;
11615 u32 mc_filter[MC_HASH_SIZE];
11616 u32 crc, bit, regidx;
11619 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11621 for (i = 0, mclist = dev->mc_list;
11622 mclist && (i < dev->mc_count);
11623 i++, mclist = mclist->next) {
11625 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11628 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11629 bit = (crc >> 24) & 0xff;
11632 mc_filter[regidx] |= (1 << bit);
11635 for (i = 0; i < MC_HASH_SIZE; i++)
11636 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11641 bp->rx_mode = rx_mode;
11642 bnx2x_set_storm_rx_mode(bp);
11645 /* called with rtnl_lock */
11646 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11648 struct sockaddr *addr = p;
11649 struct bnx2x *bp = netdev_priv(dev);
11651 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11654 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11655 if (netif_running(dev)) {
11656 if (CHIP_IS_E1(bp))
11657 bnx2x_set_eth_mac_addr_e1(bp, 1);
11659 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11665 /* called with rtnl_lock */
11666 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11667 int devad, u16 addr)
11669 struct bnx2x *bp = netdev_priv(netdev);
11672 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11674 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11675 prtad, devad, addr);
11677 if (prtad != bp->mdio.prtad) {
11678 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11679 prtad, bp->mdio.prtad);
11683 /* The HW expects different devad if CL22 is used */
11684 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11686 bnx2x_acquire_phy_lock(bp);
11687 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11688 devad, addr, &value);
11689 bnx2x_release_phy_lock(bp);
11690 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11697 /* called with rtnl_lock */
11698 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11699 u16 addr, u16 value)
11701 struct bnx2x *bp = netdev_priv(netdev);
11702 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11705 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11706 " value 0x%x\n", prtad, devad, addr, value);
11708 if (prtad != bp->mdio.prtad) {
11709 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11710 prtad, bp->mdio.prtad);
11714 /* The HW expects different devad if CL22 is used */
11715 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11717 bnx2x_acquire_phy_lock(bp);
11718 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11719 devad, addr, value);
11720 bnx2x_release_phy_lock(bp);
11724 /* called with rtnl_lock */
11725 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11727 struct bnx2x *bp = netdev_priv(dev);
11728 struct mii_ioctl_data *mdio = if_mii(ifr);
11730 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11731 mdio->phy_id, mdio->reg_num, mdio->val_in);
11733 if (!netif_running(dev))
11736 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11739 /* called with rtnl_lock */
11740 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11742 struct bnx2x *bp = netdev_priv(dev);
11745 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11746 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11749 /* This does not race with packet allocation
11750 * because the actual alloc size is
11751 * only updated as part of load
11753 dev->mtu = new_mtu;
11755 if (netif_running(dev)) {
11756 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11757 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11763 static void bnx2x_tx_timeout(struct net_device *dev)
11765 struct bnx2x *bp = netdev_priv(dev);
11767 #ifdef BNX2X_STOP_ON_ERROR
11771 /* This allows the netif to be shutdown gracefully before resetting */
11772 schedule_work(&bp->reset_task);
11776 /* called with rtnl_lock */
11777 static void bnx2x_vlan_rx_register(struct net_device *dev,
11778 struct vlan_group *vlgrp)
11780 struct bnx2x *bp = netdev_priv(dev);
11784 /* Set flags according to the required capabilities */
11785 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11787 if (dev->features & NETIF_F_HW_VLAN_TX)
11788 bp->flags |= HW_VLAN_TX_FLAG;
11790 if (dev->features & NETIF_F_HW_VLAN_RX)
11791 bp->flags |= HW_VLAN_RX_FLAG;
11793 if (netif_running(dev))
11794 bnx2x_set_client_config(bp);
11799 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11800 static void poll_bnx2x(struct net_device *dev)
11802 struct bnx2x *bp = netdev_priv(dev);
11804 disable_irq(bp->pdev->irq);
11805 bnx2x_interrupt(bp->pdev->irq, dev);
11806 enable_irq(bp->pdev->irq);
11810 static const struct net_device_ops bnx2x_netdev_ops = {
11811 .ndo_open = bnx2x_open,
11812 .ndo_stop = bnx2x_close,
11813 .ndo_start_xmit = bnx2x_start_xmit,
11814 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11815 .ndo_set_mac_address = bnx2x_change_mac_addr,
11816 .ndo_validate_addr = eth_validate_addr,
11817 .ndo_do_ioctl = bnx2x_ioctl,
11818 .ndo_change_mtu = bnx2x_change_mtu,
11819 .ndo_tx_timeout = bnx2x_tx_timeout,
11821 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11823 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11824 .ndo_poll_controller = poll_bnx2x,
11828 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11829 struct net_device *dev)
11834 SET_NETDEV_DEV(dev, &pdev->dev);
11835 bp = netdev_priv(dev);
11840 bp->func = PCI_FUNC(pdev->devfn);
11842 rc = pci_enable_device(pdev);
11844 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11848 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11849 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11852 goto err_out_disable;
11855 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11856 printk(KERN_ERR PFX "Cannot find second PCI device"
11857 " base address, aborting\n");
11859 goto err_out_disable;
11862 if (atomic_read(&pdev->enable_cnt) == 1) {
11863 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11865 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11867 goto err_out_disable;
11870 pci_set_master(pdev);
11871 pci_save_state(pdev);
11874 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11875 if (bp->pm_cap == 0) {
11876 printk(KERN_ERR PFX "Cannot find power management"
11877 " capability, aborting\n");
11879 goto err_out_release;
11882 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11883 if (bp->pcie_cap == 0) {
11884 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11887 goto err_out_release;
11890 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11891 bp->flags |= USING_DAC_FLAG;
11892 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11893 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11894 " failed, aborting\n");
11896 goto err_out_release;
11899 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11900 printk(KERN_ERR PFX "System does not support DMA,"
11903 goto err_out_release;
11906 dev->mem_start = pci_resource_start(pdev, 0);
11907 dev->base_addr = dev->mem_start;
11908 dev->mem_end = pci_resource_end(pdev, 0);
11910 dev->irq = pdev->irq;
11912 bp->regview = pci_ioremap_bar(pdev, 0);
11913 if (!bp->regview) {
11914 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11916 goto err_out_release;
11919 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11920 min_t(u64, BNX2X_DB_SIZE,
11921 pci_resource_len(pdev, 2)));
11922 if (!bp->doorbells) {
11923 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11925 goto err_out_unmap;
11928 bnx2x_set_power_state(bp, PCI_D0);
11930 /* clean indirect addresses */
11931 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11932 PCICFG_VENDOR_ID_OFFSET);
11933 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11934 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11935 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11936 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11938 dev->watchdog_timeo = TX_TIMEOUT;
11940 dev->netdev_ops = &bnx2x_netdev_ops;
11941 dev->ethtool_ops = &bnx2x_ethtool_ops;
11942 dev->features |= NETIF_F_SG;
11943 dev->features |= NETIF_F_HW_CSUM;
11944 if (bp->flags & USING_DAC_FLAG)
11945 dev->features |= NETIF_F_HIGHDMA;
11946 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11947 dev->features |= NETIF_F_TSO6;
11949 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11950 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11952 dev->vlan_features |= NETIF_F_SG;
11953 dev->vlan_features |= NETIF_F_HW_CSUM;
11954 if (bp->flags & USING_DAC_FLAG)
11955 dev->vlan_features |= NETIF_F_HIGHDMA;
11956 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11957 dev->vlan_features |= NETIF_F_TSO6;
11960 /* get_port_hwinfo() will set prtad and mmds properly */
11961 bp->mdio.prtad = MDIO_PRTAD_NONE;
11963 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11964 bp->mdio.dev = dev;
11965 bp->mdio.mdio_read = bnx2x_mdio_read;
11966 bp->mdio.mdio_write = bnx2x_mdio_write;
11972 iounmap(bp->regview);
11973 bp->regview = NULL;
11975 if (bp->doorbells) {
11976 iounmap(bp->doorbells);
11977 bp->doorbells = NULL;
11981 if (atomic_read(&pdev->enable_cnt) == 1)
11982 pci_release_regions(pdev);
11985 pci_disable_device(pdev);
11986 pci_set_drvdata(pdev, NULL);
11992 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11993 int *width, int *speed)
11995 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11997 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11999 /* return value of 1=2.5GHz 2=5GHz */
12000 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12003 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12005 const struct firmware *firmware = bp->firmware;
12006 struct bnx2x_fw_file_hdr *fw_hdr;
12007 struct bnx2x_fw_file_section *sections;
12008 u32 offset, len, num_ops;
12013 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12016 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12017 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12019 /* Make sure none of the offsets and sizes make us read beyond
12020 * the end of the firmware data */
12021 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12022 offset = be32_to_cpu(sections[i].offset);
12023 len = be32_to_cpu(sections[i].len);
12024 if (offset + len > firmware->size) {
12025 printk(KERN_ERR PFX "Section %d length is out of "
12031 /* Likewise for the init_ops offsets */
12032 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12033 ops_offsets = (u16 *)(firmware->data + offset);
12034 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12036 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12037 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12038 printk(KERN_ERR PFX "Section offset %d is out of "
12044 /* Check FW version */
12045 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12046 fw_ver = firmware->data + offset;
12047 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12048 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12049 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12050 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12051 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12052 " Should be %d.%d.%d.%d\n",
12053 fw_ver[0], fw_ver[1], fw_ver[2],
12054 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12055 BCM_5710_FW_MINOR_VERSION,
12056 BCM_5710_FW_REVISION_VERSION,
12057 BCM_5710_FW_ENGINEERING_VERSION);
12064 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12066 const __be32 *source = (const __be32 *)_source;
12067 u32 *target = (u32 *)_target;
12070 for (i = 0; i < n/4; i++)
12071 target[i] = be32_to_cpu(source[i]);
12075 Ops array is stored in the following format:
12076 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12078 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12080 const __be32 *source = (const __be32 *)_source;
12081 struct raw_op *target = (struct raw_op *)_target;
12084 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12085 tmp = be32_to_cpu(source[j]);
12086 target[i].op = (tmp >> 24) & 0xff;
12087 target[i].offset = tmp & 0xffffff;
12088 target[i].raw_data = be32_to_cpu(source[j+1]);
12092 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12094 const __be16 *source = (const __be16 *)_source;
12095 u16 *target = (u16 *)_target;
12098 for (i = 0; i < n/2; i++)
12099 target[i] = be16_to_cpu(source[i]);
12102 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12104 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12105 bp->arr = kmalloc(len, GFP_KERNEL); \
12107 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12108 "for "#arr"\n", len); \
12111 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12112 (u8 *)bp->arr, len); \
12115 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12117 char fw_file_name[40] = {0};
12118 struct bnx2x_fw_file_hdr *fw_hdr;
12121 /* Create a FW file name */
12122 if (CHIP_IS_E1(bp))
12123 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
12125 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12127 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12128 BCM_5710_FW_MAJOR_VERSION,
12129 BCM_5710_FW_MINOR_VERSION,
12130 BCM_5710_FW_REVISION_VERSION,
12131 BCM_5710_FW_ENGINEERING_VERSION);
12133 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12135 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12137 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12139 goto request_firmware_exit;
12142 rc = bnx2x_check_firmware(bp);
12144 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12145 goto request_firmware_exit;
12148 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12150 /* Initialize the pointers to the init arrays */
12152 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12155 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12158 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12161 /* STORMs firmware */
12162 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12163 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12164 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12165 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12166 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12167 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12168 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12169 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12170 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12171 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12172 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12173 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12174 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12175 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12176 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12177 be32_to_cpu(fw_hdr->csem_pram_data.offset);
12181 init_offsets_alloc_err:
12182 kfree(bp->init_ops);
12183 init_ops_alloc_err:
12184 kfree(bp->init_data);
12185 request_firmware_exit:
12186 release_firmware(bp->firmware);
12192 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12193 const struct pci_device_id *ent)
12195 struct net_device *dev = NULL;
12197 int pcie_width, pcie_speed;
12200 /* dev zeroed in init_etherdev */
12201 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12203 printk(KERN_ERR PFX "Cannot allocate net device\n");
12207 bp = netdev_priv(dev);
12208 bp->msglevel = debug;
12210 pci_set_drvdata(pdev, dev);
12212 rc = bnx2x_init_dev(pdev, dev);
12218 rc = bnx2x_init_bp(bp);
12220 goto init_one_exit;
12222 /* Set init arrays */
12223 rc = bnx2x_init_firmware(bp, &pdev->dev);
12225 printk(KERN_ERR PFX "Error loading firmware\n");
12226 goto init_one_exit;
12229 rc = register_netdev(dev);
12231 dev_err(&pdev->dev, "Cannot register net device\n");
12232 goto init_one_exit;
12235 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12236 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12237 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12238 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12239 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12240 dev->base_addr, bp->pdev->irq);
12241 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12247 iounmap(bp->regview);
12250 iounmap(bp->doorbells);
12254 if (atomic_read(&pdev->enable_cnt) == 1)
12255 pci_release_regions(pdev);
12257 pci_disable_device(pdev);
12258 pci_set_drvdata(pdev, NULL);
12263 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12265 struct net_device *dev = pci_get_drvdata(pdev);
12269 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12272 bp = netdev_priv(dev);
12274 unregister_netdev(dev);
12276 kfree(bp->init_ops_offsets);
12277 kfree(bp->init_ops);
12278 kfree(bp->init_data);
12279 release_firmware(bp->firmware);
12282 iounmap(bp->regview);
12285 iounmap(bp->doorbells);
12289 if (atomic_read(&pdev->enable_cnt) == 1)
12290 pci_release_regions(pdev);
12292 pci_disable_device(pdev);
12293 pci_set_drvdata(pdev, NULL);
12296 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12298 struct net_device *dev = pci_get_drvdata(pdev);
12302 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12305 bp = netdev_priv(dev);
12309 pci_save_state(pdev);
12311 if (!netif_running(dev)) {
12316 netif_device_detach(dev);
12318 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12320 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12327 static int bnx2x_resume(struct pci_dev *pdev)
12329 struct net_device *dev = pci_get_drvdata(pdev);
12334 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12337 bp = netdev_priv(dev);
12341 pci_restore_state(pdev);
12343 if (!netif_running(dev)) {
12348 bnx2x_set_power_state(bp, PCI_D0);
12349 netif_device_attach(dev);
12351 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12358 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12362 bp->state = BNX2X_STATE_ERROR;
12364 bp->rx_mode = BNX2X_RX_MODE_NONE;
12366 bnx2x_netif_stop(bp, 0);
12368 del_timer_sync(&bp->timer);
12369 bp->stats_state = STATS_STATE_DISABLED;
12370 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12373 bnx2x_free_irq(bp);
12375 if (CHIP_IS_E1(bp)) {
12376 struct mac_configuration_cmd *config =
12377 bnx2x_sp(bp, mcast_config);
12379 for (i = 0; i < config->hdr.length; i++)
12380 CAM_INVALIDATE(config->config_table[i]);
12383 /* Free SKBs, SGEs, TPA pool and driver internals */
12384 bnx2x_free_skbs(bp);
12385 for_each_rx_queue(bp, i)
12386 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12387 for_each_rx_queue(bp, i)
12388 netif_napi_del(&bnx2x_fp(bp, i, napi));
12389 bnx2x_free_mem(bp);
12391 bp->state = BNX2X_STATE_CLOSED;
12393 netif_carrier_off(bp->dev);
12398 static void bnx2x_eeh_recover(struct bnx2x *bp)
12402 mutex_init(&bp->port.phy_mutex);
12404 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12405 bp->link_params.shmem_base = bp->common.shmem_base;
12406 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12408 if (!bp->common.shmem_base ||
12409 (bp->common.shmem_base < 0xA0000) ||
12410 (bp->common.shmem_base >= 0xC0000)) {
12411 BNX2X_DEV_INFO("MCP not active\n");
12412 bp->flags |= NO_MCP_FLAG;
12416 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12417 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12418 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12419 BNX2X_ERR("BAD MCP validity signature\n");
12421 if (!BP_NOMCP(bp)) {
12422 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12423 & DRV_MSG_SEQ_NUMBER_MASK);
12424 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12429 * bnx2x_io_error_detected - called when PCI error is detected
12430 * @pdev: Pointer to PCI device
12431 * @state: The current pci connection state
12433 * This function is called after a PCI bus error affecting
12434 * this device has been detected.
12436 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12437 pci_channel_state_t state)
12439 struct net_device *dev = pci_get_drvdata(pdev);
12440 struct bnx2x *bp = netdev_priv(dev);
12444 netif_device_detach(dev);
12446 if (state == pci_channel_io_perm_failure) {
12448 return PCI_ERS_RESULT_DISCONNECT;
12451 if (netif_running(dev))
12452 bnx2x_eeh_nic_unload(bp);
12454 pci_disable_device(pdev);
12458 /* Request a slot reset */
12459 return PCI_ERS_RESULT_NEED_RESET;
12463 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12464 * @pdev: Pointer to PCI device
12466 * Restart the card from scratch, as if from a cold-boot.
12468 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12470 struct net_device *dev = pci_get_drvdata(pdev);
12471 struct bnx2x *bp = netdev_priv(dev);
12475 if (pci_enable_device(pdev)) {
12476 dev_err(&pdev->dev,
12477 "Cannot re-enable PCI device after reset\n");
12479 return PCI_ERS_RESULT_DISCONNECT;
12482 pci_set_master(pdev);
12483 pci_restore_state(pdev);
12485 if (netif_running(dev))
12486 bnx2x_set_power_state(bp, PCI_D0);
12490 return PCI_ERS_RESULT_RECOVERED;
12494 * bnx2x_io_resume - called when traffic can start flowing again
12495 * @pdev: Pointer to PCI device
12497 * This callback is called when the error recovery driver tells us that
12498 * its OK to resume normal operation.
12500 static void bnx2x_io_resume(struct pci_dev *pdev)
12502 struct net_device *dev = pci_get_drvdata(pdev);
12503 struct bnx2x *bp = netdev_priv(dev);
12507 bnx2x_eeh_recover(bp);
12509 if (netif_running(dev))
12510 bnx2x_nic_load(bp, LOAD_NORMAL);
12512 netif_device_attach(dev);
12517 static struct pci_error_handlers bnx2x_err_handler = {
12518 .error_detected = bnx2x_io_error_detected,
12519 .slot_reset = bnx2x_io_slot_reset,
12520 .resume = bnx2x_io_resume,
12523 static struct pci_driver bnx2x_pci_driver = {
12524 .name = DRV_MODULE_NAME,
12525 .id_table = bnx2x_pci_tbl,
12526 .probe = bnx2x_init_one,
12527 .remove = __devexit_p(bnx2x_remove_one),
12528 .suspend = bnx2x_suspend,
12529 .resume = bnx2x_resume,
12530 .err_handler = &bnx2x_err_handler,
12533 static int __init bnx2x_init(void)
12537 printk(KERN_INFO "%s", version);
12539 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12540 if (bnx2x_wq == NULL) {
12541 printk(KERN_ERR PFX "Cannot create workqueue\n");
12545 ret = pci_register_driver(&bnx2x_pci_driver);
12547 printk(KERN_ERR PFX "Cannot register driver\n");
12548 destroy_workqueue(bnx2x_wq);
12553 static void __exit bnx2x_cleanup(void)
12555 pci_unregister_driver(&bnx2x_pci_driver);
12557 destroy_workqueue(bnx2x_wq);
12560 module_init(bnx2x_init);
12561 module_exit(bnx2x_cleanup);
12565 /* count denotes the number of new completions we have seen */
12566 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12568 struct eth_spe *spe;
12570 #ifdef BNX2X_STOP_ON_ERROR
12571 if (unlikely(bp->panic))
12575 spin_lock_bh(&bp->spq_lock);
12576 bp->cnic_spq_pending -= count;
12578 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12579 bp->cnic_spq_pending++) {
12581 if (!bp->cnic_kwq_pending)
12584 spe = bnx2x_sp_get_next(bp);
12585 *spe = *bp->cnic_kwq_cons;
12587 bp->cnic_kwq_pending--;
12589 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12590 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12592 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12593 bp->cnic_kwq_cons = bp->cnic_kwq;
12595 bp->cnic_kwq_cons++;
12597 bnx2x_sp_prod_update(bp);
12598 spin_unlock_bh(&bp->spq_lock);
12601 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12602 struct kwqe_16 *kwqes[], u32 count)
12604 struct bnx2x *bp = netdev_priv(dev);
12607 #ifdef BNX2X_STOP_ON_ERROR
12608 if (unlikely(bp->panic))
12612 spin_lock_bh(&bp->spq_lock);
12614 for (i = 0; i < count; i++) {
12615 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12617 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12620 *bp->cnic_kwq_prod = *spe;
12622 bp->cnic_kwq_pending++;
12624 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12625 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12626 spe->data.mac_config_addr.hi,
12627 spe->data.mac_config_addr.lo,
12628 bp->cnic_kwq_pending);
12630 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12631 bp->cnic_kwq_prod = bp->cnic_kwq;
12633 bp->cnic_kwq_prod++;
12636 spin_unlock_bh(&bp->spq_lock);
12638 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12639 bnx2x_cnic_sp_post(bp, 0);
12644 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12646 struct cnic_ops *c_ops;
12649 mutex_lock(&bp->cnic_mutex);
12650 c_ops = bp->cnic_ops;
12652 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12653 mutex_unlock(&bp->cnic_mutex);
12658 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12660 struct cnic_ops *c_ops;
12664 c_ops = rcu_dereference(bp->cnic_ops);
12666 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12673 * for commands that have no data
12675 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12677 struct cnic_ctl_info ctl = {0};
12681 return bnx2x_cnic_ctl_send(bp, &ctl);
12684 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12686 struct cnic_ctl_info ctl;
12688 /* first we tell CNIC and only then we count this as a completion */
12689 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12690 ctl.data.comp.cid = cid;
12692 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12693 bnx2x_cnic_sp_post(bp, 1);
12696 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12698 struct bnx2x *bp = netdev_priv(dev);
12701 switch (ctl->cmd) {
12702 case DRV_CTL_CTXTBL_WR_CMD: {
12703 u32 index = ctl->data.io.offset;
12704 dma_addr_t addr = ctl->data.io.dma_addr;
12706 bnx2x_ilt_wr(bp, index, addr);
12710 case DRV_CTL_COMPLETION_CMD: {
12711 int count = ctl->data.comp.comp_count;
12713 bnx2x_cnic_sp_post(bp, count);
12717 /* rtnl_lock is held. */
12718 case DRV_CTL_START_L2_CMD: {
12719 u32 cli = ctl->data.ring.client_id;
12721 bp->rx_mode_cl_mask |= (1 << cli);
12722 bnx2x_set_storm_rx_mode(bp);
12726 /* rtnl_lock is held. */
12727 case DRV_CTL_STOP_L2_CMD: {
12728 u32 cli = ctl->data.ring.client_id;
12730 bp->rx_mode_cl_mask &= ~(1 << cli);
12731 bnx2x_set_storm_rx_mode(bp);
12736 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12743 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12745 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12747 if (bp->flags & USING_MSIX_FLAG) {
12748 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12749 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12750 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12752 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12753 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12755 cp->irq_arr[0].status_blk = bp->cnic_sb;
12756 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12757 cp->irq_arr[1].status_blk = bp->def_status_blk;
12758 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12763 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12766 struct bnx2x *bp = netdev_priv(dev);
12767 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12772 if (atomic_read(&bp->intr_sem) != 0)
12775 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12779 bp->cnic_kwq_cons = bp->cnic_kwq;
12780 bp->cnic_kwq_prod = bp->cnic_kwq;
12781 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12783 bp->cnic_spq_pending = 0;
12784 bp->cnic_kwq_pending = 0;
12786 bp->cnic_data = data;
12789 cp->drv_state = CNIC_DRV_STATE_REGD;
12791 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12793 bnx2x_setup_cnic_irq_info(bp);
12794 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12795 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12796 rcu_assign_pointer(bp->cnic_ops, ops);
12801 static int bnx2x_unregister_cnic(struct net_device *dev)
12803 struct bnx2x *bp = netdev_priv(dev);
12804 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12806 mutex_lock(&bp->cnic_mutex);
12807 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12808 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12809 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12812 rcu_assign_pointer(bp->cnic_ops, NULL);
12813 mutex_unlock(&bp->cnic_mutex);
12815 kfree(bp->cnic_kwq);
12816 bp->cnic_kwq = NULL;
12821 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12823 struct bnx2x *bp = netdev_priv(dev);
12824 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12826 cp->drv_owner = THIS_MODULE;
12827 cp->chip_id = CHIP_ID(bp);
12828 cp->pdev = bp->pdev;
12829 cp->io_base = bp->regview;
12830 cp->io_base2 = bp->doorbells;
12831 cp->max_kwqe_pending = 8;
12832 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12833 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12834 cp->ctx_tbl_len = CNIC_ILT_LINES;
12835 cp->starting_cid = BCM_CNIC_CID_START;
12836 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12837 cp->drv_ctl = bnx2x_drv_ctl;
12838 cp->drv_register_cnic = bnx2x_register_cnic;
12839 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12843 EXPORT_SYMBOL(bnx2x_cnic_probe);
12845 #endif /* BCM_CNIC */