1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.1-5"
61 #define DRV_MODULE_RELDATE "2009/11/09"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version[] __devinitdata =
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125 static struct workqueue_struct *bnx2x_wq;
127 enum bnx2x_board_type {
133 /* indexed by board_type, above */
136 } board_info[] __devinitdata = {
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
143 static const struct pci_device_id bnx2x_pci_tbl[] = {
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
157 * locking is done by mcp
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
179 static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
206 struct dmae_command dmae;
207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
219 memset(&dmae, 0, sizeof(struct dmae_command));
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 DMAE_CMD_ENDIANITY_DW_SWAP |
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251 mutex_lock(&bp->dmae_mutex);
255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
263 BNX2X_ERR("DMAE timeout!\n");
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
274 mutex_unlock(&bp->dmae_mutex);
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 struct dmae_command dmae;
280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
294 memset(&dmae, 0, sizeof(struct dmae_command));
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 DMAE_CMD_ENDIANITY_DW_SWAP |
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323 mutex_lock(&bp->dmae_mutex);
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
332 while (*wb_comp != DMAE_COMP_VAL) {
335 BNX2X_ERR("DMAE timeout!\n");
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349 mutex_unlock(&bp->dmae_mutex);
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
382 REG_RD_DMAE(bp, reg, wb_data, 2);
384 return HILO_U64(wb_data[0], wb_data[1]);
388 static int bnx2x_mc_assert(struct bnx2x *bp)
392 u32 row0, row1, row2, row3;
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
509 static void bnx2x_fw_dump(struct bnx2x *bp)
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516 mark = ((mark + 0x3) & ~0x3);
517 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
519 printk(KERN_ERR PFX);
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
525 printk(KERN_CONT "%s", (char *)data);
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
532 printk(KERN_CONT "%s", (char *)data);
534 printk(KERN_ERR PFX "end of fw dump\n");
537 static void bnx2x_panic_dump(struct bnx2x *bp)
542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545 BNX2X_ERR("begin crash dump -----------------\n");
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
556 for_each_queue(bp, i) {
557 struct bnx2x_fastpath *fp = &bp->fp[i];
559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
573 for_each_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i];
576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582 fp->status_blk->c_status_block.status_block_index,
583 fp->tx_db.data.prod);
588 for_each_queue(bp, i) {
589 struct bnx2x_fastpath *fp = &bp->fp[i];
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593 for (j = start; j != end; j = RX_BD(j + 1)) {
594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
603 for (j = start; j != end; j = RX_SGE(j + 1)) {
604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
622 for_each_queue(bp, i) {
623 struct bnx2x_fastpath *fp = &bp->fp[i];
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
646 BNX2X_ERR("end crash dump -----------------\n");
649 static void bnx2x_int_enable(struct bnx2x *bp)
651 int port = BP_PORT(bp);
652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 REG_WR(bp, addr, val);
678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
684 REG_WR(bp, addr, val);
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
696 /* enable nig and gpio3 attention */
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 /* Make sure that interrupts are indeed enabled from here on */
709 static void bnx2x_int_disable(struct bnx2x *bp)
711 int port = BP_PORT(bp);
712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 /* flush all outstanding writes */
726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
736 /* disable interrupt handling */
737 atomic_inc(&bp->intr_sem);
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
744 /* make sure all ISRs are done */
746 synchronize_irq(bp->msix_table[0].vector);
751 for_each_queue(bp, i)
752 synchronize_irq(bp->msix_table[i + offset].vector);
754 synchronize_irq(bp->pdev->irq);
756 /* make sure sp_task is not running */
757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
764 * General service functions
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768 u8 storm, u16 index, u8 op, u8 update)
770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
772 struct igu_ack_register igu_ack;
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
785 /* Make sure that ACK is written */
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
792 struct host_status_block *fpsb = fp->status_blk;
794 barrier(); /* status block is written to by the chip */
795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
813 * fast path service functions
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
818 /* Tell compiler that consumer and producer can change */
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
823 /* free skb in the packet ring at pos idx
824 * return idx of last bd freed
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
832 struct sk_buff *skb = tx_buf->skb;
833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851 BNX2X_ERR("BAD nbd!\n");
855 new_cons = nbd + tx_buf->first_bd;
857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
860 /* Skip a parse bd... */
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
884 tx_buf->first_bd = 0;
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
896 barrier(); /* Tell compiler that prod and cons can change */
897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
904 #ifdef BNX2X_STOP_ON_ERROR
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
910 return (s16)(fp->bp->tx_ring_size) - used;
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
917 /* Tell compiler that status block fields can change */
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
923 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
925 struct bnx2x *bp = fp->bp;
926 struct netdev_queue *txq;
927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
929 #ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
934 txq = netdev_get_tx_queue(bp->dev, fp->index);
935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
938 while (sw_cons != hw_cons) {
941 pkt_cons = TX_BD(sw_cons);
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
946 hw_cons, sw_cons, pkt_cons);
948 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
960 /* TBD need a thresh? */
961 if (unlikely(netif_tx_queue_stopped(txq))) {
963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
971 if ((netif_tx_queue_stopped(txq)) &&
972 (bp->state == BNX2X_STATE_OPEN) &&
973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
974 netif_tx_wake_queue(txq);
980 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
983 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984 union eth_rx_cqe *rr_cqe)
986 struct bnx2x *bp = fp->bp;
987 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
992 fp->index, cid, command, bp->state,
993 rr_cqe->ramrod_cqe.ramrod_type);
998 switch (command | fp->state) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000 BNX2X_FP_STATE_OPENING):
1001 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1003 fp->state = BNX2X_FP_STATE_OPEN;
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1009 fp->state = BNX2X_FP_STATE_HALTED;
1013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command, fp->state);
1017 mb(); /* force bnx2x_wait_ramrod() to see the change */
1021 switch (command | bp->state) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024 bp->state = BNX2X_STATE_OPEN;
1027 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030 fp->state = BNX2X_FP_STATE_HALTED;
1033 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1034 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1048 bp->set_mac_pending--;
1052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1054 bp->set_mac_pending--;
1059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1060 command, bp->state);
1063 mb(); /* force bnx2x_wait_ramrod() to see the change */
1066 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct page *page = sw_buf->page;
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1073 /* Skip "next page" elements */
1077 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1078 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1081 sw_buf->page = NULL;
1086 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, int last)
1091 for (i = 0; i < last; i++)
1092 bnx2x_free_rx_sge(bp, fp, i);
1095 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096 struct bnx2x_fastpath *fp, u16 index)
1098 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1103 if (unlikely(page == NULL))
1106 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1107 PCI_DMA_FROMDEVICE);
1108 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1109 __free_pages(page, PAGES_PER_SGE_SHIFT);
1113 sw_buf->page = page;
1114 pci_unmap_addr_set(sw_buf, mapping, mapping);
1116 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1122 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123 struct bnx2x_fastpath *fp, u16 index)
1125 struct sk_buff *skb;
1126 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1130 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131 if (unlikely(skb == NULL))
1134 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1135 PCI_DMA_FROMDEVICE);
1136 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1142 pci_unmap_addr_set(rx_buf, mapping, mapping);
1144 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1150 /* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1155 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156 struct sk_buff *skb, u16 cons, u16 prod)
1158 struct bnx2x *bp = fp->bp;
1159 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1164 pci_dma_sync_single_for_device(bp->pdev,
1165 pci_unmap_addr(cons_rx_buf, mapping),
1166 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1168 prod_rx_buf->skb = cons_rx_buf->skb;
1169 pci_unmap_addr_set(prod_rx_buf, mapping,
1170 pci_unmap_addr(cons_rx_buf, mapping));
1171 *prod_bd = *cons_bd;
1174 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1177 u16 last_max = fp->last_max_sge;
1179 if (SUB_S16(idx, last_max) > 0)
1180 fp->last_max_sge = idx;
1183 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1187 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188 int idx = RX_SGE_CNT * i - 1;
1190 for (j = 0; j < 2; j++) {
1191 SGE_MASK_CLEAR_BIT(fp, idx);
1197 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198 struct eth_fast_path_rx_cqe *fp_cqe)
1200 struct bnx2x *bp = fp->bp;
1201 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1202 le16_to_cpu(fp_cqe->len_on_bd)) >>
1204 u16 last_max, last_elem, first_elem;
1211 /* First mark all used pages */
1212 for (i = 0; i < sge_len; i++)
1213 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1215 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp->sge_mask));
1220 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1222 last_max = RX_SGE(fp->last_max_sge);
1223 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1226 /* If ring is not full */
1227 if (last_elem + 1 != first_elem)
1230 /* Now update the prod */
1231 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232 if (likely(fp->sge_mask[i]))
1235 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236 delta += RX_SGE_MASK_ELEM_SZ;
1240 fp->rx_sge_prod += delta;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1245 DP(NETIF_MSG_RX_STATUS,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp->last_max_sge, fp->rx_sge_prod);
1250 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp->sge_mask, 0xff,
1254 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
1258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp);
1263 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264 struct sk_buff *skb, u16 cons, u16 prod)
1266 struct bnx2x *bp = fp->bp;
1267 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1275 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1276 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp->tpa_pool[queue] = *cons_rx_buf;
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1285 fp->tpa_state[queue] = BNX2X_TPA_START;
1287 /* point prod_bd to new skb */
1288 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1291 #ifdef BNX2X_STOP_ON_ERROR
1292 fp->tpa_queue_used |= (1 << queue);
1293 #ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1296 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1298 fp->tpa_queue_used);
1302 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303 struct sk_buff *skb,
1304 struct eth_fast_path_rx_cqe *fp_cqe,
1307 struct sw_rx_page *rx_pg, old_rx_pg;
1308 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309 u32 i, frag_len, frag_size, pages;
1313 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1314 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1316 /* This is needed in order to enable forwarding support */
1318 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1319 max(frag_size, (u32)len_on_bd));
1321 #ifdef BNX2X_STOP_ON_ERROR
1323 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe->pkt_len, len_on_bd);
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
1339 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1340 rx_pg = &fp->rx_page_ring[sge_idx];
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346 if (unlikely(err)) {
1347 fp->eth_q_stats.rx_skb_alloc_failed++;
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1353 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1358 skb->data_len += frag_len;
1359 skb->truesize += frag_len;
1360 skb->len += frag_len;
1362 frag_size -= frag_len;
1368 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1372 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373 struct sk_buff *skb = rx_buf->skb;
1375 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1380 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1381 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1383 if (likely(new_skb)) {
1384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
1388 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389 PARSING_FLAGS_VLAN);
1390 int is_not_hwaccel_vlan_cqe =
1391 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1395 prefetch(((char *)(skb)) + 128);
1397 #ifdef BNX2X_STOP_ON_ERROR
1398 if (pad + len > bp->rx_buf_size) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad, len, bp->rx_buf_size);
1407 skb_reserve(skb, pad);
1410 skb->protocol = eth_type_trans(skb, bp->dev);
1411 skb->ip_summed = CHECKSUM_UNNECESSARY;
1416 iph = (struct iphdr *)skb->data;
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe))
1421 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1424 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1427 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428 &cqe->fast_path_cqe, cqe_idx)) {
1430 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431 (!is_not_hwaccel_vlan_cqe))
1432 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433 le16_to_cpu(cqe->fast_path_cqe.
1437 netif_receive_skb(skb);
1439 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1445 /* put new skb in bin */
1446 fp->tpa_pool[queue].skb = new_skb;
1449 /* else drop the packet and keep the buffer in the bin */
1450 DP(NETIF_MSG_RX_STATUS,
1451 "Failed to allocate new skb - dropping packet!\n");
1452 fp->eth_q_stats.rx_skb_alloc_failed++;
1455 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1458 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459 struct bnx2x_fastpath *fp,
1460 u16 bd_prod, u16 rx_comp_prod,
1463 struct ustorm_eth_rx_producers rx_prods = {0};
1466 /* Update producers */
1467 rx_prods.bd_prod = bd_prod;
1468 rx_prods.cqe_prod = rx_comp_prod;
1469 rx_prods.sge_prod = rx_sge_prod;
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1481 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
1483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1484 ((u32 *)&rx_prods)[i]);
1486 mmiowb(); /* keep prod updates ordered */
1488 DP(NETIF_MSG_RX_STATUS,
1489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1493 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1495 struct bnx2x *bp = fp->bp;
1496 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1497 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1500 #ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp->panic))
1505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
1507 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1511 bd_cons = fp->rx_bd_cons;
1512 bd_prod = fp->rx_bd_prod;
1513 bd_prod_fw = bd_prod;
1514 sw_comp_cons = fp->rx_comp_cons;
1515 sw_comp_prod = fp->rx_comp_prod;
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1522 DP(NETIF_MSG_RX_STATUS,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1524 fp->index, hw_comp_cons, sw_comp_cons);
1526 while (sw_comp_cons != hw_comp_cons) {
1527 struct sw_rx_bd *rx_buf = NULL;
1528 struct sk_buff *skb;
1529 union eth_rx_cqe *cqe;
1533 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534 bd_prod = RX_BD(bd_prod);
1535 bd_cons = RX_BD(bd_cons);
1537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp->rx_desc_ring[bd_prod])) -
1544 cqe = &fp->rx_comp_ring[comp_ring_cons];
1545 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1547 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1549 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1550 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1551 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1554 /* is this a slowpath msg? */
1555 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1556 bnx2x_sp_event(fp, cqe);
1559 /* this is an rx packet */
1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1564 prefetch((u8 *)skb + 256);
1565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566 pad = cqe->fast_path_cqe.placement_offset;
1568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp->disable_tpa) &&
1571 (TPA_TYPE(cqe_fp_flags) !=
1572 (TPA_TYPE_START | TPA_TYPE_END))) {
1573 u16 queue = cqe->fast_path_cqe.queue_index;
1575 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576 DP(NETIF_MSG_RX_STATUS,
1577 "calling tpa_start on queue %d\n",
1580 bnx2x_tpa_start(fp, queue, skb,
1585 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586 DP(NETIF_MSG_RX_STATUS,
1587 "calling tpa_stop on queue %d\n",
1590 if (!BNX2X_RX_SUM_FIX(cqe))
1591 BNX2X_ERR("STOP on none TCP "
1594 /* This is a size of the linear data
1596 len = le16_to_cpu(cqe->fast_path_cqe.
1598 bnx2x_tpa_stop(bp, fp, queue, pad,
1599 len, cqe, comp_ring_cons);
1600 #ifdef BNX2X_STOP_ON_ERROR
1605 bnx2x_update_sge_prod(fp,
1606 &cqe->fast_path_cqe);
1611 pci_dma_sync_single_for_device(bp->pdev,
1612 pci_unmap_addr(rx_buf, mapping),
1613 pad + RX_COPY_THRESH,
1614 PCI_DMA_FROMDEVICE);
1616 prefetch(((char *)(skb)) + 128);
1618 /* is this an error packet? */
1619 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1620 DP(NETIF_MSG_RX_ERR,
1621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags, sw_comp_cons);
1623 fp->eth_q_stats.rx_err_discard_pkt++;
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1630 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631 (len <= RX_COPY_THRESH)) {
1632 struct sk_buff *new_skb;
1634 new_skb = netdev_alloc_skb(bp->dev,
1636 if (new_skb == NULL) {
1637 DP(NETIF_MSG_RX_ERR,
1638 "ERROR packet dropped "
1639 "because of alloc failure\n");
1640 fp->eth_q_stats.rx_skb_alloc_failed++;
1645 skb_copy_from_linear_data_offset(skb, pad,
1646 new_skb->data + pad, len);
1647 skb_reserve(new_skb, pad);
1648 skb_put(new_skb, len);
1650 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1655 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1656 pci_unmap_single(bp->pdev,
1657 pci_unmap_addr(rx_buf, mapping),
1659 PCI_DMA_FROMDEVICE);
1660 skb_reserve(skb, pad);
1664 DP(NETIF_MSG_RX_ERR,
1665 "ERROR packet dropped because "
1666 "of alloc failure\n");
1667 fp->eth_q_stats.rx_skb_alloc_failed++;
1669 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1673 skb->protocol = eth_type_trans(skb, bp->dev);
1675 skb->ip_summed = CHECKSUM_NONE;
1677 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678 skb->ip_summed = CHECKSUM_UNNECESSARY;
1680 fp->eth_q_stats.hw_csum_err++;
1684 skb_record_rx_queue(skb, fp->index);
1687 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1688 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689 PARSING_FLAGS_VLAN))
1690 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1694 netif_receive_skb(skb);
1700 bd_cons = NEXT_RX_IDX(bd_cons);
1701 bd_prod = NEXT_RX_IDX(bd_prod);
1702 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1705 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1708 if (rx_pkt == budget)
1712 fp->rx_bd_cons = bd_cons;
1713 fp->rx_bd_prod = bd_prod_fw;
1714 fp->rx_comp_cons = sw_comp_cons;
1715 fp->rx_comp_prod = sw_comp_prod;
1717 /* Update producers */
1718 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1721 fp->rx_pkt += rx_pkt;
1727 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1729 struct bnx2x_fastpath *fp = fp_cookie;
1730 struct bnx2x *bp = fp->bp;
1732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1738 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1739 fp->index, fp->sb_id);
1740 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1742 #ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp->panic))
1747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp->rx_cons_sb);
1749 prefetch(fp->tx_cons_sb);
1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1757 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1759 struct bnx2x *bp = netdev_priv(dev_instance);
1760 u16 status = bnx2x_ack_int(bp);
1764 /* Return here if interrupt is shared and it's not for us */
1765 if (unlikely(status == 0)) {
1766 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1769 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1771 /* Return here if interrupt is disabled */
1772 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1777 #ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp->panic))
1782 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783 struct bnx2x_fastpath *fp = &bp->fp[i];
1785 mask = 0x2 << fp->sb_id;
1786 if (status & mask) {
1787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp->rx_cons_sb);
1789 prefetch(&fp->status_blk->u_status_block.
1790 status_block_index);
1791 prefetch(fp->tx_cons_sb);
1792 prefetch(&fp->status_blk->c_status_block.
1793 status_block_index);
1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1805 c_ops = rcu_dereference(bp->cnic_ops);
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1814 if (unlikely(status & 0x1)) {
1815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1829 /* end of fast path */
1831 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1836 * General service functions
1839 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1842 u32 resource_bit = (1 << resource);
1843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1862 /* Validating that the resource is not already taken */
1863 lock_status = REG_RD(bp, hw_lock_control_reg);
1864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
1872 /* Try to acquire the lock */
1873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
1875 if (lock_status & resource_bit)
1880 DP(NETIF_MSG_HW, "Timeout\n");
1884 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1887 u32 resource_bit = (1 << resource);
1888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1906 /* Validating that the resource is currently taken */
1907 lock_status = REG_RD(bp, hw_lock_control_reg);
1908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1914 REG_WR(bp, hw_lock_control_reg, resource_bit);
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1921 mutex_lock(&bp->port.phy_mutex);
1923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1927 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1932 mutex_unlock(&bp->port.phy_mutex);
1935 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1965 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2018 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2064 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2066 u32 spio_mask = (1 << spio_num);
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2110 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2125 bp->port.advertising |= ADVERTISED_Asym_Pause;
2129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2135 static void bnx2x_link_report(struct bnx2x *bp)
2137 if (bp->flags & MF_FUNC_DIS) {
2138 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2143 if (bp->link_vars.link_up) {
2146 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev);
2148 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2150 line_speed = bp->link_vars.line_speed;
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2160 printk("%d Mbps ", line_speed);
2162 if (bp->link_vars.duplex == DUPLEX_FULL)
2163 printk("full duplex");
2165 printk("half duplex");
2167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2169 printk(", receive ");
2170 if (bp->link_vars.flow_ctrl &
2172 printk("& transmit ");
2174 printk(", transmit ");
2176 printk("flow control ON");
2180 } else { /* link_down */
2181 netif_carrier_off(bp->dev);
2182 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2186 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2188 if (!BP_NOMCP(bp)) {
2191 /* Initialize link parameters structure variables */
2192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
2194 if (bp->dev->mtu > 5000)
2195 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2197 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2199 bnx2x_acquire_phy_lock(bp);
2201 if (load_mode == LOAD_DIAG)
2202 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2204 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2206 bnx2x_release_phy_lock(bp);
2208 bnx2x_calc_fc_adv(bp);
2210 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2212 bnx2x_link_report(bp);
2217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2221 static void bnx2x_link_set(struct bnx2x *bp)
2223 if (!BP_NOMCP(bp)) {
2224 bnx2x_acquire_phy_lock(bp);
2225 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2226 bnx2x_release_phy_lock(bp);
2228 bnx2x_calc_fc_adv(bp);
2230 BNX2X_ERR("Bootcode is missing - can not set link\n");
2233 static void bnx2x__link_reset(struct bnx2x *bp)
2235 if (!BP_NOMCP(bp)) {
2236 bnx2x_acquire_phy_lock(bp);
2237 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2238 bnx2x_release_phy_lock(bp);
2240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2243 static u8 bnx2x_link_test(struct bnx2x *bp)
2247 bnx2x_acquire_phy_lock(bp);
2248 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2249 bnx2x_release_phy_lock(bp);
2254 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2256 u32 r_param = bp->link_vars.line_speed / 8;
2257 u32 fair_periodic_timeout_usec;
2260 memset(&(bp->cmng.rs_vars), 0,
2261 sizeof(struct rate_shaping_vars_per_port));
2262 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp->cmng.rs_vars.rs_threshold =
2271 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285 /* since each tick is 4 usec */
2286 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2289 /* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2292 sum of vn_min_rates.
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2298 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2301 int port = BP_PORT(bp);
2304 bp->vn_weight_sum = 0;
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 int func = 2*vn + port;
2307 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2311 /* Skip hidden vns */
2312 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2315 /* If min rate is zero - set it to 1 */
2317 vn_min_rate = DEF_MIN_RATE;
2321 bp->vn_weight_sum += vn_min_rate;
2324 /* ... only if all min rates are zeros - disable fairness */
2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2335 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2337 struct rate_shaping_vars_per_vn m_rs_vn;
2338 struct fairness_vars_per_vn m_fair_vn;
2339 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340 u16 vn_min_rate, vn_max_rate;
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2351 /* If min rate is zero - set it to 1 */
2353 vn_min_rate = DEF_MIN_RATE;
2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn.vn_counter.rate = vn_max_rate;
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn.vn_counter.quota =
2369 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2371 if (bp->vn_weight_sum) {
2372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
2374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2377 m_fair_vn.vn_credit_delta =
2378 max((u32)(vn_min_rate * (T_FAIR_COEF /
2379 (8 * bp->vn_weight_sum))),
2380 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2381 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn.vn_credit_delta);
2385 /* Store it to internal memory */
2386 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389 ((u32 *)(&m_rs_vn))[i]);
2391 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394 ((u32 *)(&m_fair_vn))[i]);
2398 /* This function is called upon link interrupt */
2399 static void bnx2x_link_attn(struct bnx2x *bp)
2401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2404 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2406 if (bp->link_vars.link_up) {
2408 /* dropless flow control */
2409 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2410 int port = BP_PORT(bp);
2411 u32 pause_enabled = 0;
2413 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2416 REG_WR(bp, BAR_USTRORM_INTMEM +
2417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2421 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422 struct host_port_stats *pstats;
2424 pstats = bnx2x_sp(bp, port_stats);
2425 /* reset old bmac stats */
2426 memset(&(pstats->mac_stx[0]), 0,
2427 sizeof(struct mac_stx));
2429 if (bp->state == BNX2X_STATE_OPEN)
2430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2433 /* indicate link status */
2434 bnx2x_link_report(bp);
2437 int port = BP_PORT(bp);
2441 /* Set the attention towards other drivers on the same port */
2442 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443 if (vn == BP_E1HVN(bp))
2446 func = ((vn << 1) | port);
2447 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2451 if (bp->link_vars.link_up) {
2454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp);
2457 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2458 bnx2x_init_vn_minmax(bp, 2*vn + port);
2460 /* Store it to internal memory */
2462 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465 ((u32 *)(&bp->cmng))[i]);
2470 static void bnx2x__link_status_update(struct bnx2x *bp)
2472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2477 if (bp->link_vars.link_up)
2478 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2482 bnx2x_calc_vn_weight_sum(bp);
2484 /* indicate link status */
2485 bnx2x_link_report(bp);
2488 static void bnx2x_pmf_update(struct bnx2x *bp)
2490 int port = BP_PORT(bp);
2494 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2496 /* enable nig attention */
2497 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2501 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2509 * General service functions
2512 /* send the MCP a request, block until there is a reply */
2513 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2515 int func = BP_FUNC(bp);
2516 u32 seq = ++bp->fw_seq;
2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2521 mutex_lock(&bp->fw_mb_mutex);
2522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2526 /* let the FW do it's magic ... */
2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt*delay, rc, seq);
2537 /* is this a reply to our command? */
2538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539 rc &= FW_MSG_CODE_MASK;
2542 BNX2X_ERR("FW failed to respond!\n");
2546 mutex_unlock(&bp->fw_mb_mutex);
2551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2552 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2553 static void bnx2x_set_rx_mode(struct net_device *dev);
2555 static void bnx2x_e1h_disable(struct bnx2x *bp)
2557 int port = BP_PORT(bp);
2559 netif_tx_disable(bp->dev);
2560 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2562 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2564 netif_carrier_off(bp->dev);
2567 static void bnx2x_e1h_enable(struct bnx2x *bp)
2569 int port = BP_PORT(bp);
2571 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2573 /* Tx queue should be only reenabled */
2574 netif_tx_wake_all_queues(bp->dev);
2577 * Should not call netif_carrier_on since it will be called if the link
2578 * is up when checking for link state
2582 static void bnx2x_update_min_max(struct bnx2x *bp)
2584 int port = BP_PORT(bp);
2587 /* Init rate shaping and fairness contexts */
2588 bnx2x_init_port_minmax(bp);
2590 bnx2x_calc_vn_weight_sum(bp);
2592 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2593 bnx2x_init_vn_minmax(bp, 2*vn + port);
2598 /* Set the attention towards other drivers on the same port */
2599 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2600 if (vn == BP_E1HVN(bp))
2603 func = ((vn << 1) | port);
2604 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2605 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2608 /* Store it to internal memory */
2609 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2610 REG_WR(bp, BAR_XSTRORM_INTMEM +
2611 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2612 ((u32 *)(&bp->cmng))[i]);
2616 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2618 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2620 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2623 * This is the only place besides the function initialization
2624 * where the bp->flags can change so it is done without any
2627 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2628 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2629 bp->flags |= MF_FUNC_DIS;
2631 bnx2x_e1h_disable(bp);
2633 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2634 bp->flags &= ~MF_FUNC_DIS;
2636 bnx2x_e1h_enable(bp);
2638 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2640 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2642 bnx2x_update_min_max(bp);
2643 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2646 /* Report results to MCP */
2648 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2650 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2653 /* must be called under the spq lock */
2654 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2656 struct eth_spe *next_spe = bp->spq_prod_bd;
2658 if (bp->spq_prod_bd == bp->spq_last_bd) {
2659 bp->spq_prod_bd = bp->spq;
2660 bp->spq_prod_idx = 0;
2661 DP(NETIF_MSG_TIMER, "end of spq\n");
2669 /* must be called under the spq lock */
2670 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2672 int func = BP_FUNC(bp);
2674 /* Make sure that BD data is updated before writing the producer */
2677 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2682 /* the slow path queue is odd since completions arrive on the fastpath ring */
2683 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2684 u32 data_hi, u32 data_lo, int common)
2686 struct eth_spe *spe;
2688 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2689 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2690 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2691 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2692 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2694 #ifdef BNX2X_STOP_ON_ERROR
2695 if (unlikely(bp->panic))
2699 spin_lock_bh(&bp->spq_lock);
2701 if (!bp->spq_left) {
2702 BNX2X_ERR("BUG! SPQ ring full!\n");
2703 spin_unlock_bh(&bp->spq_lock);
2708 spe = bnx2x_sp_get_next(bp);
2710 /* CID needs port number to be encoded int it */
2711 spe->hdr.conn_and_cmd_data =
2712 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2714 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2717 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2719 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2720 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2724 bnx2x_sp_prod_update(bp);
2725 spin_unlock_bh(&bp->spq_lock);
2729 /* acquire split MCP access lock register */
2730 static int bnx2x_acquire_alr(struct bnx2x *bp)
2737 for (j = 0; j < i*10; j++) {
2739 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2740 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2741 if (val & (1L << 31))
2746 if (!(val & (1L << 31))) {
2747 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2754 /* release split MCP access lock register */
2755 static void bnx2x_release_alr(struct bnx2x *bp)
2759 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2762 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2764 struct host_def_status_block *def_sb = bp->def_status_blk;
2767 barrier(); /* status block is written to by the chip */
2768 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2769 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2772 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2773 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2776 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2777 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2780 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2781 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2784 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2785 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2792 * slow path service functions
2795 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2797 int port = BP_PORT(bp);
2798 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2799 COMMAND_REG_ATTN_BITS_SET);
2800 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2801 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2802 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2803 NIG_REG_MASK_INTERRUPT_PORT0;
2807 if (bp->attn_state & asserted)
2808 BNX2X_ERR("IGU ERROR\n");
2810 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2811 aeu_mask = REG_RD(bp, aeu_addr);
2813 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2814 aeu_mask, asserted);
2815 aeu_mask &= ~(asserted & 0xff);
2816 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2818 REG_WR(bp, aeu_addr, aeu_mask);
2819 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2821 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2822 bp->attn_state |= asserted;
2823 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2825 if (asserted & ATTN_HARD_WIRED_MASK) {
2826 if (asserted & ATTN_NIG_FOR_FUNC) {
2828 bnx2x_acquire_phy_lock(bp);
2830 /* save nig interrupt mask */
2831 nig_mask = REG_RD(bp, nig_int_mask_addr);
2832 REG_WR(bp, nig_int_mask_addr, 0);
2834 bnx2x_link_attn(bp);
2836 /* handle unicore attn? */
2838 if (asserted & ATTN_SW_TIMER_4_FUNC)
2839 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2841 if (asserted & GPIO_2_FUNC)
2842 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2844 if (asserted & GPIO_3_FUNC)
2845 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2847 if (asserted & GPIO_4_FUNC)
2848 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2851 if (asserted & ATTN_GENERAL_ATTN_1) {
2852 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2853 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2855 if (asserted & ATTN_GENERAL_ATTN_2) {
2856 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2857 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2859 if (asserted & ATTN_GENERAL_ATTN_3) {
2860 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2861 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2864 if (asserted & ATTN_GENERAL_ATTN_4) {
2865 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2866 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2868 if (asserted & ATTN_GENERAL_ATTN_5) {
2869 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2870 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2872 if (asserted & ATTN_GENERAL_ATTN_6) {
2873 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2874 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2878 } /* if hardwired */
2880 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2882 REG_WR(bp, hc_addr, asserted);
2884 /* now set back the mask */
2885 if (asserted & ATTN_NIG_FOR_FUNC) {
2886 REG_WR(bp, nig_int_mask_addr, nig_mask);
2887 bnx2x_release_phy_lock(bp);
2891 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2893 int port = BP_PORT(bp);
2895 /* mark the failure */
2896 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2897 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2898 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2899 bp->link_params.ext_phy_config);
2901 /* log the failure */
2902 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2903 " the driver to shutdown the card to prevent permanent"
2904 " damage. Please contact Dell Support for assistance\n",
2908 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2910 int port = BP_PORT(bp);
2912 u32 val, swap_val, swap_override;
2914 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2915 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2917 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2919 val = REG_RD(bp, reg_offset);
2920 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2921 REG_WR(bp, reg_offset, val);
2923 BNX2X_ERR("SPIO5 hw attention\n");
2925 /* Fan failure attention */
2926 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2927 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2928 /* Low power mode is controlled by GPIO 2 */
2929 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2930 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2931 /* The PHY reset is controlled by GPIO 1 */
2932 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2933 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2936 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2937 /* The PHY reset is controlled by GPIO 1 */
2938 /* fake the port number to cancel the swap done in
2940 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2941 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2942 port = (swap_val && swap_override) ^ 1;
2943 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2944 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2950 bnx2x_fan_failure(bp);
2953 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2954 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2955 bnx2x_acquire_phy_lock(bp);
2956 bnx2x_handle_module_detect_int(&bp->link_params);
2957 bnx2x_release_phy_lock(bp);
2960 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2962 val = REG_RD(bp, reg_offset);
2963 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2964 REG_WR(bp, reg_offset, val);
2966 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2967 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2972 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2976 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2978 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2979 BNX2X_ERR("DB hw attention 0x%x\n", val);
2980 /* DORQ discard attention */
2982 BNX2X_ERR("FATAL error from DORQ\n");
2985 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2987 int port = BP_PORT(bp);
2990 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2991 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2993 val = REG_RD(bp, reg_offset);
2994 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2995 REG_WR(bp, reg_offset, val);
2997 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2998 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3003 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3007 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3009 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3010 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3011 /* CFC error attention */
3013 BNX2X_ERR("FATAL error from CFC\n");
3016 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3018 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3019 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3020 /* RQ_USDMDP_FIFO_OVERFLOW */
3022 BNX2X_ERR("FATAL error from PXP\n");
3025 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3027 int port = BP_PORT(bp);
3030 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3031 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3033 val = REG_RD(bp, reg_offset);
3034 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3035 REG_WR(bp, reg_offset, val);
3037 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3038 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3043 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3047 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3049 if (attn & BNX2X_PMF_LINK_ASSERT) {
3050 int func = BP_FUNC(bp);
3052 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3053 bp->mf_config = SHMEM_RD(bp,
3054 mf_cfg.func_mf_config[func].config);
3055 val = SHMEM_RD(bp, func_mb[func].drv_status);
3056 if (val & DRV_STATUS_DCC_EVENT_MASK)
3058 (val & DRV_STATUS_DCC_EVENT_MASK));
3059 bnx2x__link_status_update(bp);
3060 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3061 bnx2x_pmf_update(bp);
3063 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3065 BNX2X_ERR("MC assert!\n");
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3072 } else if (attn & BNX2X_MCP_ASSERT) {
3074 BNX2X_ERR("MCP assert!\n");
3075 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3079 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3082 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3083 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3084 if (attn & BNX2X_GRC_TIMEOUT) {
3085 val = CHIP_IS_E1H(bp) ?
3086 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3087 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3089 if (attn & BNX2X_GRC_RSV) {
3090 val = CHIP_IS_E1H(bp) ?
3091 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3092 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3094 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3098 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3100 struct attn_route attn;
3101 struct attn_route group_mask;
3102 int port = BP_PORT(bp);
3108 /* need to take HW lock because MCP or other port might also
3109 try to handle this event */
3110 bnx2x_acquire_alr(bp);
3112 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3113 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3114 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3115 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3116 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3117 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3119 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3120 if (deasserted & (1 << index)) {
3121 group_mask = bp->attn_group[index];
3123 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3124 index, group_mask.sig[0], group_mask.sig[1],
3125 group_mask.sig[2], group_mask.sig[3]);
3127 bnx2x_attn_int_deasserted3(bp,
3128 attn.sig[3] & group_mask.sig[3]);
3129 bnx2x_attn_int_deasserted1(bp,
3130 attn.sig[1] & group_mask.sig[1]);
3131 bnx2x_attn_int_deasserted2(bp,
3132 attn.sig[2] & group_mask.sig[2]);
3133 bnx2x_attn_int_deasserted0(bp,
3134 attn.sig[0] & group_mask.sig[0]);
3136 if ((attn.sig[0] & group_mask.sig[0] &
3137 HW_PRTY_ASSERT_SET_0) ||
3138 (attn.sig[1] & group_mask.sig[1] &
3139 HW_PRTY_ASSERT_SET_1) ||
3140 (attn.sig[2] & group_mask.sig[2] &
3141 HW_PRTY_ASSERT_SET_2))
3142 BNX2X_ERR("FATAL HW block parity attention\n");
3146 bnx2x_release_alr(bp);
3148 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3151 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3153 REG_WR(bp, reg_addr, val);
3155 if (~bp->attn_state & deasserted)
3156 BNX2X_ERR("IGU ERROR\n");
3158 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3159 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3161 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3162 aeu_mask = REG_RD(bp, reg_addr);
3164 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3165 aeu_mask, deasserted);
3166 aeu_mask |= (deasserted & 0xff);
3167 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3169 REG_WR(bp, reg_addr, aeu_mask);
3170 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3172 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3173 bp->attn_state &= ~deasserted;
3174 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3177 static void bnx2x_attn_int(struct bnx2x *bp)
3179 /* read local copy of bits */
3180 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3182 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3184 u32 attn_state = bp->attn_state;
3186 /* look for changed bits */
3187 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3188 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3191 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3192 attn_bits, attn_ack, asserted, deasserted);
3194 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3195 BNX2X_ERR("BAD attention state\n");
3197 /* handle bits that were raised */
3199 bnx2x_attn_int_asserted(bp, asserted);
3202 bnx2x_attn_int_deasserted(bp, deasserted);
3205 static void bnx2x_sp_task(struct work_struct *work)
3207 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3211 /* Return here if interrupt is disabled */
3212 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3213 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3217 status = bnx2x_update_dsb_idx(bp);
3218 /* if (status == 0) */
3219 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3221 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3227 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3229 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3231 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3233 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3235 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3240 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3242 struct net_device *dev = dev_instance;
3243 struct bnx2x *bp = netdev_priv(dev);
3245 /* Return here if interrupt is disabled */
3246 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3247 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3251 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3253 #ifdef BNX2X_STOP_ON_ERROR
3254 if (unlikely(bp->panic))
3260 struct cnic_ops *c_ops;
3263 c_ops = rcu_dereference(bp->cnic_ops);
3265 c_ops->cnic_handler(bp->cnic_data, NULL);
3269 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3274 /* end of slow path */
3278 /****************************************************************************
3280 ****************************************************************************/
3282 /* sum[hi:lo] += add[hi:lo] */
3283 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3286 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3289 /* difference = minuend - subtrahend */
3290 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3292 if (m_lo < s_lo) { \
3294 d_hi = m_hi - s_hi; \
3296 /* we can 'loan' 1 */ \
3298 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3300 /* m_hi <= s_hi */ \
3305 /* m_lo >= s_lo */ \
3306 if (m_hi < s_hi) { \
3310 /* m_hi >= s_hi */ \
3311 d_hi = m_hi - s_hi; \
3312 d_lo = m_lo - s_lo; \
3317 #define UPDATE_STAT64(s, t) \
3319 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3320 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3321 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3322 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3323 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3324 pstats->mac_stx[1].t##_lo, diff.lo); \
3327 #define UPDATE_STAT64_NIG(s, t) \
3329 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3330 diff.lo, new->s##_lo, old->s##_lo); \
3331 ADD_64(estats->t##_hi, diff.hi, \
3332 estats->t##_lo, diff.lo); \
3335 /* sum[hi:lo] += add */
3336 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3339 s_hi += (s_lo < a) ? 1 : 0; \
3342 #define UPDATE_EXTEND_STAT(s) \
3344 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3345 pstats->mac_stx[1].s##_lo, \
3349 #define UPDATE_EXTEND_TSTAT(s, t) \
3351 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3352 old_tclient->s = tclient->s; \
3353 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3356 #define UPDATE_EXTEND_USTAT(s, t) \
3358 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3359 old_uclient->s = uclient->s; \
3360 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3363 #define UPDATE_EXTEND_XSTAT(s, t) \
3365 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3366 old_xclient->s = xclient->s; \
3367 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3370 /* minuend -= subtrahend */
3371 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3373 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3376 /* minuend[hi:lo] -= subtrahend */
3377 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3379 SUB_64(m_hi, 0, m_lo, s); \
3382 #define SUB_EXTEND_USTAT(s, t) \
3384 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3385 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3389 * General service functions
3392 static inline long bnx2x_hilo(u32 *hiref)
3394 u32 lo = *(hiref + 1);
3395 #if (BITS_PER_LONG == 64)
3398 return HILO_U64(hi, lo);
3405 * Init service functions
3408 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3410 if (!bp->stats_pending) {
3411 struct eth_query_ramrod_data ramrod_data = {0};
3414 ramrod_data.drv_counter = bp->stats_counter++;
3415 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3416 for_each_queue(bp, i)
3417 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3419 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3420 ((u32 *)&ramrod_data)[1],
3421 ((u32 *)&ramrod_data)[0], 0);
3423 /* stats ramrod has it's own slot on the spq */
3425 bp->stats_pending = 1;
3430 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3432 struct dmae_command *dmae = &bp->stats_dmae;
3433 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3435 *stats_comp = DMAE_COMP_VAL;
3436 if (CHIP_REV_IS_SLOW(bp))
3440 if (bp->executer_idx) {
3441 int loader_idx = PMF_DMAE_C(bp);
3443 memset(dmae, 0, sizeof(struct dmae_command));
3445 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3446 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3447 DMAE_CMD_DST_RESET |
3449 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3451 DMAE_CMD_ENDIANITY_DW_SWAP |
3453 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3455 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3456 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3457 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3458 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3459 sizeof(struct dmae_command) *
3460 (loader_idx + 1)) >> 2;
3461 dmae->dst_addr_hi = 0;
3462 dmae->len = sizeof(struct dmae_command) >> 2;
3465 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3466 dmae->comp_addr_hi = 0;
3470 bnx2x_post_dmae(bp, dmae, loader_idx);
3472 } else if (bp->func_stx) {
3474 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3478 static int bnx2x_stats_comp(struct bnx2x *bp)
3480 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3484 while (*stats_comp != DMAE_COMP_VAL) {
3486 BNX2X_ERR("timeout waiting for stats finished\n");
3496 * Statistics service functions
3499 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3501 struct dmae_command *dmae;
3503 int loader_idx = PMF_DMAE_C(bp);
3504 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3507 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3508 BNX2X_ERR("BUG!\n");
3512 bp->executer_idx = 0;
3514 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3516 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3518 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3520 DMAE_CMD_ENDIANITY_DW_SWAP |
3522 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3523 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3525 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3526 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3527 dmae->src_addr_lo = bp->port.port_stx >> 2;
3528 dmae->src_addr_hi = 0;
3529 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3530 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3531 dmae->len = DMAE_LEN32_RD_MAX;
3532 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3533 dmae->comp_addr_hi = 0;
3536 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3537 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3538 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3539 dmae->src_addr_hi = 0;
3540 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3541 DMAE_LEN32_RD_MAX * 4);
3542 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3543 DMAE_LEN32_RD_MAX * 4);
3544 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3545 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3546 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3547 dmae->comp_val = DMAE_COMP_VAL;
3550 bnx2x_hw_stats_post(bp);
3551 bnx2x_stats_comp(bp);
3554 static void bnx2x_port_stats_init(struct bnx2x *bp)
3556 struct dmae_command *dmae;
3557 int port = BP_PORT(bp);
3558 int vn = BP_E1HVN(bp);
3560 int loader_idx = PMF_DMAE_C(bp);
3562 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3565 if (!bp->link_vars.link_up || !bp->port.pmf) {
3566 BNX2X_ERR("BUG!\n");
3570 bp->executer_idx = 0;
3573 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3574 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3575 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3577 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3579 DMAE_CMD_ENDIANITY_DW_SWAP |
3581 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3582 (vn << DMAE_CMD_E1HVN_SHIFT));
3584 if (bp->port.port_stx) {
3586 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3587 dmae->opcode = opcode;
3588 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3589 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3590 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3591 dmae->dst_addr_hi = 0;
3592 dmae->len = sizeof(struct host_port_stats) >> 2;
3593 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3594 dmae->comp_addr_hi = 0;
3600 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3601 dmae->opcode = opcode;
3602 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3603 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3604 dmae->dst_addr_lo = bp->func_stx >> 2;
3605 dmae->dst_addr_hi = 0;
3606 dmae->len = sizeof(struct host_func_stats) >> 2;
3607 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3608 dmae->comp_addr_hi = 0;
3613 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3614 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3615 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3617 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3619 DMAE_CMD_ENDIANITY_DW_SWAP |
3621 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3622 (vn << DMAE_CMD_E1HVN_SHIFT));
3624 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3626 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3627 NIG_REG_INGRESS_BMAC0_MEM);
3629 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3630 BIGMAC_REGISTER_TX_STAT_GTBYT */
3631 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3632 dmae->opcode = opcode;
3633 dmae->src_addr_lo = (mac_addr +
3634 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3635 dmae->src_addr_hi = 0;
3636 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3637 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3638 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3639 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3640 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3641 dmae->comp_addr_hi = 0;
3644 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3645 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3646 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3647 dmae->opcode = opcode;
3648 dmae->src_addr_lo = (mac_addr +
3649 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3650 dmae->src_addr_hi = 0;
3651 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3652 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3653 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3654 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3655 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3656 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3657 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3658 dmae->comp_addr_hi = 0;
3661 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3663 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3665 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3666 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3667 dmae->opcode = opcode;
3668 dmae->src_addr_lo = (mac_addr +
3669 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3670 dmae->src_addr_hi = 0;
3671 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3672 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3673 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3674 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3675 dmae->comp_addr_hi = 0;
3678 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3679 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3680 dmae->opcode = opcode;
3681 dmae->src_addr_lo = (mac_addr +
3682 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3683 dmae->src_addr_hi = 0;
3684 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3685 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3686 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3687 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3689 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3690 dmae->comp_addr_hi = 0;
3693 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3694 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3695 dmae->opcode = opcode;
3696 dmae->src_addr_lo = (mac_addr +
3697 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3698 dmae->src_addr_hi = 0;
3699 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3700 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3701 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3702 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3703 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3704 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3705 dmae->comp_addr_hi = 0;
3710 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3711 dmae->opcode = opcode;
3712 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3713 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3714 dmae->src_addr_hi = 0;
3715 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3716 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3717 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3718 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3719 dmae->comp_addr_hi = 0;
3722 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3723 dmae->opcode = opcode;
3724 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3725 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3726 dmae->src_addr_hi = 0;
3727 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3728 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3729 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3730 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3731 dmae->len = (2*sizeof(u32)) >> 2;
3732 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3733 dmae->comp_addr_hi = 0;
3736 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3737 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3738 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3739 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3741 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3743 DMAE_CMD_ENDIANITY_DW_SWAP |
3745 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3746 (vn << DMAE_CMD_E1HVN_SHIFT));
3747 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3748 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3749 dmae->src_addr_hi = 0;
3750 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3751 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3752 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3753 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3754 dmae->len = (2*sizeof(u32)) >> 2;
3755 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3756 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3757 dmae->comp_val = DMAE_COMP_VAL;
3762 static void bnx2x_func_stats_init(struct bnx2x *bp)
3764 struct dmae_command *dmae = &bp->stats_dmae;
3765 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3768 if (!bp->func_stx) {
3769 BNX2X_ERR("BUG!\n");
3773 bp->executer_idx = 0;
3774 memset(dmae, 0, sizeof(struct dmae_command));
3776 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3777 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3778 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3780 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3782 DMAE_CMD_ENDIANITY_DW_SWAP |
3784 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3785 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3786 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3787 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3788 dmae->dst_addr_lo = bp->func_stx >> 2;
3789 dmae->dst_addr_hi = 0;
3790 dmae->len = sizeof(struct host_func_stats) >> 2;
3791 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3792 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3793 dmae->comp_val = DMAE_COMP_VAL;
3798 static void bnx2x_stats_start(struct bnx2x *bp)
3801 bnx2x_port_stats_init(bp);
3803 else if (bp->func_stx)
3804 bnx2x_func_stats_init(bp);
3806 bnx2x_hw_stats_post(bp);
3807 bnx2x_storm_stats_post(bp);
3810 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3812 bnx2x_stats_comp(bp);
3813 bnx2x_stats_pmf_update(bp);
3814 bnx2x_stats_start(bp);
3817 static void bnx2x_stats_restart(struct bnx2x *bp)
3819 bnx2x_stats_comp(bp);
3820 bnx2x_stats_start(bp);
3823 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3825 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3826 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3827 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3833 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3834 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3835 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3836 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3837 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3838 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3839 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3840 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3841 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3842 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3843 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3844 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3845 UPDATE_STAT64(tx_stat_gt127,
3846 tx_stat_etherstatspkts65octetsto127octets);
3847 UPDATE_STAT64(tx_stat_gt255,
3848 tx_stat_etherstatspkts128octetsto255octets);
3849 UPDATE_STAT64(tx_stat_gt511,
3850 tx_stat_etherstatspkts256octetsto511octets);
3851 UPDATE_STAT64(tx_stat_gt1023,
3852 tx_stat_etherstatspkts512octetsto1023octets);
3853 UPDATE_STAT64(tx_stat_gt1518,
3854 tx_stat_etherstatspkts1024octetsto1522octets);
3855 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3856 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3857 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3858 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3859 UPDATE_STAT64(tx_stat_gterr,
3860 tx_stat_dot3statsinternalmactransmiterrors);
3861 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3863 estats->pause_frames_received_hi =
3864 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3865 estats->pause_frames_received_lo =
3866 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3868 estats->pause_frames_sent_hi =
3869 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3870 estats->pause_frames_sent_lo =
3871 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3874 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3876 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3877 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3878 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3880 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3881 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3882 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3883 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3884 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3885 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3886 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3887 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3888 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3889 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3890 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3891 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3892 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3893 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3894 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3895 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3896 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3897 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3900 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3901 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3902 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3908 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3909 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3910 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3912 estats->pause_frames_received_hi =
3913 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3914 estats->pause_frames_received_lo =
3915 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3916 ADD_64(estats->pause_frames_received_hi,
3917 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3918 estats->pause_frames_received_lo,
3919 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3921 estats->pause_frames_sent_hi =
3922 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3923 estats->pause_frames_sent_lo =
3924 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3925 ADD_64(estats->pause_frames_sent_hi,
3926 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3927 estats->pause_frames_sent_lo,
3928 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3931 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3933 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3934 struct nig_stats *old = &(bp->port.old_nig_stats);
3935 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3936 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3943 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3944 bnx2x_bmac_stats_update(bp);
3946 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3947 bnx2x_emac_stats_update(bp);
3949 else { /* unreached */
3950 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3954 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3955 new->brb_discard - old->brb_discard);
3956 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3957 new->brb_truncate - old->brb_truncate);
3959 UPDATE_STAT64_NIG(egress_mac_pkt0,
3960 etherstatspkts1024octetsto1522octets);
3961 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3963 memcpy(old, new, sizeof(struct nig_stats));
3965 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3966 sizeof(struct mac_stx));
3967 estats->brb_drop_hi = pstats->brb_drop_hi;
3968 estats->brb_drop_lo = pstats->brb_drop_lo;
3970 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3972 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3973 if (nig_timer_max != estats->nig_timer_max) {
3974 estats->nig_timer_max = nig_timer_max;
3975 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3981 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3983 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3984 struct tstorm_per_port_stats *tport =
3985 &stats->tstorm_common.port_statistics;
3986 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3987 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3990 memcpy(&(fstats->total_bytes_received_hi),
3991 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3992 sizeof(struct host_func_stats) - 2*sizeof(u32));
3993 estats->error_bytes_received_hi = 0;
3994 estats->error_bytes_received_lo = 0;
3995 estats->etherstatsoverrsizepkts_hi = 0;
3996 estats->etherstatsoverrsizepkts_lo = 0;
3997 estats->no_buff_discard_hi = 0;
3998 estats->no_buff_discard_lo = 0;
4000 for_each_queue(bp, i) {
4001 struct bnx2x_fastpath *fp = &bp->fp[i];
4002 int cl_id = fp->cl_id;
4003 struct tstorm_per_client_stats *tclient =
4004 &stats->tstorm_common.client_statistics[cl_id];
4005 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4006 struct ustorm_per_client_stats *uclient =
4007 &stats->ustorm_common.client_statistics[cl_id];
4008 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4009 struct xstorm_per_client_stats *xclient =
4010 &stats->xstorm_common.client_statistics[cl_id];
4011 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4012 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4015 /* are storm stats valid? */
4016 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4017 bp->stats_counter) {
4018 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4019 " xstorm counter (%d) != stats_counter (%d)\n",
4020 i, xclient->stats_counter, bp->stats_counter);
4023 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4024 bp->stats_counter) {
4025 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4026 " tstorm counter (%d) != stats_counter (%d)\n",
4027 i, tclient->stats_counter, bp->stats_counter);
4030 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4031 bp->stats_counter) {
4032 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4033 " ustorm counter (%d) != stats_counter (%d)\n",
4034 i, uclient->stats_counter, bp->stats_counter);
4038 qstats->total_bytes_received_hi =
4039 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4040 qstats->total_bytes_received_lo =
4041 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4043 ADD_64(qstats->total_bytes_received_hi,
4044 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4045 qstats->total_bytes_received_lo,
4046 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4048 ADD_64(qstats->total_bytes_received_hi,
4049 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4050 qstats->total_bytes_received_lo,
4051 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4053 qstats->valid_bytes_received_hi =
4054 qstats->total_bytes_received_hi;
4055 qstats->valid_bytes_received_lo =
4056 qstats->total_bytes_received_lo;
4058 qstats->error_bytes_received_hi =
4059 le32_to_cpu(tclient->rcv_error_bytes.hi);
4060 qstats->error_bytes_received_lo =
4061 le32_to_cpu(tclient->rcv_error_bytes.lo);
4063 ADD_64(qstats->total_bytes_received_hi,
4064 qstats->error_bytes_received_hi,
4065 qstats->total_bytes_received_lo,
4066 qstats->error_bytes_received_lo);
4068 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4069 total_unicast_packets_received);
4070 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4071 total_multicast_packets_received);
4072 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4073 total_broadcast_packets_received);
4074 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4075 etherstatsoverrsizepkts);
4076 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4078 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4079 total_unicast_packets_received);
4080 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4081 total_multicast_packets_received);
4082 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4083 total_broadcast_packets_received);
4084 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4085 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4086 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4088 qstats->total_bytes_transmitted_hi =
4089 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4090 qstats->total_bytes_transmitted_lo =
4091 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4093 ADD_64(qstats->total_bytes_transmitted_hi,
4094 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4095 qstats->total_bytes_transmitted_lo,
4096 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4098 ADD_64(qstats->total_bytes_transmitted_hi,
4099 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4100 qstats->total_bytes_transmitted_lo,
4101 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4103 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4104 total_unicast_packets_transmitted);
4105 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4106 total_multicast_packets_transmitted);
4107 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4108 total_broadcast_packets_transmitted);
4110 old_tclient->checksum_discard = tclient->checksum_discard;
4111 old_tclient->ttl0_discard = tclient->ttl0_discard;
4113 ADD_64(fstats->total_bytes_received_hi,
4114 qstats->total_bytes_received_hi,
4115 fstats->total_bytes_received_lo,
4116 qstats->total_bytes_received_lo);
4117 ADD_64(fstats->total_bytes_transmitted_hi,
4118 qstats->total_bytes_transmitted_hi,
4119 fstats->total_bytes_transmitted_lo,
4120 qstats->total_bytes_transmitted_lo);
4121 ADD_64(fstats->total_unicast_packets_received_hi,
4122 qstats->total_unicast_packets_received_hi,
4123 fstats->total_unicast_packets_received_lo,
4124 qstats->total_unicast_packets_received_lo);
4125 ADD_64(fstats->total_multicast_packets_received_hi,
4126 qstats->total_multicast_packets_received_hi,
4127 fstats->total_multicast_packets_received_lo,
4128 qstats->total_multicast_packets_received_lo);
4129 ADD_64(fstats->total_broadcast_packets_received_hi,
4130 qstats->total_broadcast_packets_received_hi,
4131 fstats->total_broadcast_packets_received_lo,
4132 qstats->total_broadcast_packets_received_lo);
4133 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4134 qstats->total_unicast_packets_transmitted_hi,
4135 fstats->total_unicast_packets_transmitted_lo,
4136 qstats->total_unicast_packets_transmitted_lo);
4137 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4138 qstats->total_multicast_packets_transmitted_hi,
4139 fstats->total_multicast_packets_transmitted_lo,
4140 qstats->total_multicast_packets_transmitted_lo);
4141 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4142 qstats->total_broadcast_packets_transmitted_hi,
4143 fstats->total_broadcast_packets_transmitted_lo,
4144 qstats->total_broadcast_packets_transmitted_lo);
4145 ADD_64(fstats->valid_bytes_received_hi,
4146 qstats->valid_bytes_received_hi,
4147 fstats->valid_bytes_received_lo,
4148 qstats->valid_bytes_received_lo);
4150 ADD_64(estats->error_bytes_received_hi,
4151 qstats->error_bytes_received_hi,
4152 estats->error_bytes_received_lo,
4153 qstats->error_bytes_received_lo);
4154 ADD_64(estats->etherstatsoverrsizepkts_hi,
4155 qstats->etherstatsoverrsizepkts_hi,
4156 estats->etherstatsoverrsizepkts_lo,
4157 qstats->etherstatsoverrsizepkts_lo);
4158 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4159 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4162 ADD_64(fstats->total_bytes_received_hi,
4163 estats->rx_stat_ifhcinbadoctets_hi,
4164 fstats->total_bytes_received_lo,
4165 estats->rx_stat_ifhcinbadoctets_lo);
4167 memcpy(estats, &(fstats->total_bytes_received_hi),
4168 sizeof(struct host_func_stats) - 2*sizeof(u32));
4170 ADD_64(estats->etherstatsoverrsizepkts_hi,
4171 estats->rx_stat_dot3statsframestoolong_hi,
4172 estats->etherstatsoverrsizepkts_lo,
4173 estats->rx_stat_dot3statsframestoolong_lo);
4174 ADD_64(estats->error_bytes_received_hi,
4175 estats->rx_stat_ifhcinbadoctets_hi,
4176 estats->error_bytes_received_lo,
4177 estats->rx_stat_ifhcinbadoctets_lo);
4180 estats->mac_filter_discard =
4181 le32_to_cpu(tport->mac_filter_discard);
4182 estats->xxoverflow_discard =
4183 le32_to_cpu(tport->xxoverflow_discard);
4184 estats->brb_truncate_discard =
4185 le32_to_cpu(tport->brb_truncate_discard);
4186 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4189 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4191 bp->stats_pending = 0;
4196 static void bnx2x_net_stats_update(struct bnx2x *bp)
4198 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4199 struct net_device_stats *nstats = &bp->dev->stats;
4202 nstats->rx_packets =
4203 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4204 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4205 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4207 nstats->tx_packets =
4208 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4209 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4210 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4212 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4214 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4216 nstats->rx_dropped = estats->mac_discard;
4217 for_each_queue(bp, i)
4218 nstats->rx_dropped +=
4219 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4221 nstats->tx_dropped = 0;
4224 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4226 nstats->collisions =
4227 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4229 nstats->rx_length_errors =
4230 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4231 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4232 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4233 bnx2x_hilo(&estats->brb_truncate_hi);
4234 nstats->rx_crc_errors =
4235 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4236 nstats->rx_frame_errors =
4237 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4238 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4239 nstats->rx_missed_errors = estats->xxoverflow_discard;
4241 nstats->rx_errors = nstats->rx_length_errors +
4242 nstats->rx_over_errors +
4243 nstats->rx_crc_errors +
4244 nstats->rx_frame_errors +
4245 nstats->rx_fifo_errors +
4246 nstats->rx_missed_errors;
4248 nstats->tx_aborted_errors =
4249 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4250 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4251 nstats->tx_carrier_errors =
4252 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4253 nstats->tx_fifo_errors = 0;
4254 nstats->tx_heartbeat_errors = 0;
4255 nstats->tx_window_errors = 0;
4257 nstats->tx_errors = nstats->tx_aborted_errors +
4258 nstats->tx_carrier_errors +
4259 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4262 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4264 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4267 estats->driver_xoff = 0;
4268 estats->rx_err_discard_pkt = 0;
4269 estats->rx_skb_alloc_failed = 0;
4270 estats->hw_csum_err = 0;
4271 for_each_queue(bp, i) {
4272 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4274 estats->driver_xoff += qstats->driver_xoff;
4275 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4276 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4277 estats->hw_csum_err += qstats->hw_csum_err;
4281 static void bnx2x_stats_update(struct bnx2x *bp)
4283 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4285 if (*stats_comp != DMAE_COMP_VAL)
4289 bnx2x_hw_stats_update(bp);
4291 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4292 BNX2X_ERR("storm stats were not updated for 3 times\n");
4297 bnx2x_net_stats_update(bp);
4298 bnx2x_drv_stats_update(bp);
4300 if (bp->msglevel & NETIF_MSG_TIMER) {
4301 struct bnx2x_fastpath *fp0_rx = bp->fp;
4302 struct bnx2x_fastpath *fp0_tx = bp->fp;
4303 struct tstorm_per_client_stats *old_tclient =
4304 &bp->fp->old_tclient;
4305 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4306 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4307 struct net_device_stats *nstats = &bp->dev->stats;
4310 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4311 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4313 bnx2x_tx_avail(fp0_tx),
4314 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4315 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4317 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4318 fp0_rx->rx_comp_cons),
4319 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4320 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4321 "brb truncate %u\n",
4322 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4323 qstats->driver_xoff,
4324 estats->brb_drop_lo, estats->brb_truncate_lo);
4325 printk(KERN_DEBUG "tstats: checksum_discard %u "
4326 "packets_too_big_discard %lu no_buff_discard %lu "
4327 "mac_discard %u mac_filter_discard %u "
4328 "xxovrflow_discard %u brb_truncate_discard %u "
4329 "ttl0_discard %u\n",
4330 le32_to_cpu(old_tclient->checksum_discard),
4331 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4332 bnx2x_hilo(&qstats->no_buff_discard_hi),
4333 estats->mac_discard, estats->mac_filter_discard,
4334 estats->xxoverflow_discard, estats->brb_truncate_discard,
4335 le32_to_cpu(old_tclient->ttl0_discard));
4337 for_each_queue(bp, i) {
4338 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4339 bnx2x_fp(bp, i, tx_pkt),
4340 bnx2x_fp(bp, i, rx_pkt),
4341 bnx2x_fp(bp, i, rx_calls));
4345 bnx2x_hw_stats_post(bp);
4346 bnx2x_storm_stats_post(bp);
4349 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4351 struct dmae_command *dmae;
4353 int loader_idx = PMF_DMAE_C(bp);
4354 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4356 bp->executer_idx = 0;
4358 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4360 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4362 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4364 DMAE_CMD_ENDIANITY_DW_SWAP |
4366 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4367 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4369 if (bp->port.port_stx) {
4371 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4373 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4375 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4376 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4377 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4378 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4379 dmae->dst_addr_hi = 0;
4380 dmae->len = sizeof(struct host_port_stats) >> 2;
4382 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4383 dmae->comp_addr_hi = 0;
4386 dmae->comp_addr_lo =
4387 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4388 dmae->comp_addr_hi =
4389 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4390 dmae->comp_val = DMAE_COMP_VAL;
4398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4399 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4400 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4401 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4402 dmae->dst_addr_lo = bp->func_stx >> 2;
4403 dmae->dst_addr_hi = 0;
4404 dmae->len = sizeof(struct host_func_stats) >> 2;
4405 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4407 dmae->comp_val = DMAE_COMP_VAL;
4413 static void bnx2x_stats_stop(struct bnx2x *bp)
4417 bnx2x_stats_comp(bp);
4420 update = (bnx2x_hw_stats_update(bp) == 0);
4422 update |= (bnx2x_storm_stats_update(bp) == 0);
4425 bnx2x_net_stats_update(bp);
4428 bnx2x_port_stats_stop(bp);
4430 bnx2x_hw_stats_post(bp);
4431 bnx2x_stats_comp(bp);
4435 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4439 static const struct {
4440 void (*action)(struct bnx2x *bp);
4441 enum bnx2x_stats_state next_state;
4442 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4445 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4446 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4447 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4448 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4451 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4452 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4453 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4454 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4458 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4460 enum bnx2x_stats_state state = bp->stats_state;
4462 bnx2x_stats_stm[state][event].action(bp);
4463 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4465 /* Make sure the state has been "changed" */
4468 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4469 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4470 state, event, bp->stats_state);
4473 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4475 struct dmae_command *dmae;
4476 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4479 if (!bp->port.pmf || !bp->port.port_stx) {
4480 BNX2X_ERR("BUG!\n");
4484 bp->executer_idx = 0;
4486 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4487 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4488 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4489 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4491 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4493 DMAE_CMD_ENDIANITY_DW_SWAP |
4495 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4496 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4497 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4498 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4499 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4500 dmae->dst_addr_hi = 0;
4501 dmae->len = sizeof(struct host_port_stats) >> 2;
4502 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4503 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4504 dmae->comp_val = DMAE_COMP_VAL;
4507 bnx2x_hw_stats_post(bp);
4508 bnx2x_stats_comp(bp);
4511 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4513 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4514 int port = BP_PORT(bp);
4519 if (!bp->port.pmf || !bp->func_stx) {
4520 BNX2X_ERR("BUG!\n");
4524 /* save our func_stx */
4525 func_stx = bp->func_stx;
4527 for (vn = VN_0; vn < vn_max; vn++) {
4530 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4531 bnx2x_func_stats_init(bp);
4532 bnx2x_hw_stats_post(bp);
4533 bnx2x_stats_comp(bp);
4536 /* restore our func_stx */
4537 bp->func_stx = func_stx;
4540 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4542 struct dmae_command *dmae = &bp->stats_dmae;
4543 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4546 if (!bp->func_stx) {
4547 BNX2X_ERR("BUG!\n");
4551 bp->executer_idx = 0;
4552 memset(dmae, 0, sizeof(struct dmae_command));
4554 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4555 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4556 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4558 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4560 DMAE_CMD_ENDIANITY_DW_SWAP |
4562 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4563 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4564 dmae->src_addr_lo = bp->func_stx >> 2;
4565 dmae->src_addr_hi = 0;
4566 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4567 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4568 dmae->len = sizeof(struct host_func_stats) >> 2;
4569 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4570 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4571 dmae->comp_val = DMAE_COMP_VAL;
4574 bnx2x_hw_stats_post(bp);
4575 bnx2x_stats_comp(bp);
4578 static void bnx2x_stats_init(struct bnx2x *bp)
4580 int port = BP_PORT(bp);
4581 int func = BP_FUNC(bp);
4584 bp->stats_pending = 0;
4585 bp->executer_idx = 0;
4586 bp->stats_counter = 0;
4588 /* port and func stats for management */
4589 if (!BP_NOMCP(bp)) {
4590 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4591 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4594 bp->port.port_stx = 0;
4597 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4598 bp->port.port_stx, bp->func_stx);
4601 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4602 bp->port.old_nig_stats.brb_discard =
4603 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4604 bp->port.old_nig_stats.brb_truncate =
4605 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4606 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4607 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4608 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4609 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4611 /* function stats */
4612 for_each_queue(bp, i) {
4613 struct bnx2x_fastpath *fp = &bp->fp[i];
4615 memset(&fp->old_tclient, 0,
4616 sizeof(struct tstorm_per_client_stats));
4617 memset(&fp->old_uclient, 0,
4618 sizeof(struct ustorm_per_client_stats));
4619 memset(&fp->old_xclient, 0,
4620 sizeof(struct xstorm_per_client_stats));
4621 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4624 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4625 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4627 bp->stats_state = STATS_STATE_DISABLED;
4630 if (bp->port.port_stx)
4631 bnx2x_port_stats_base_init(bp);
4634 bnx2x_func_stats_base_init(bp);
4636 } else if (bp->func_stx)
4637 bnx2x_func_stats_base_update(bp);
4640 static void bnx2x_timer(unsigned long data)
4642 struct bnx2x *bp = (struct bnx2x *) data;
4644 if (!netif_running(bp->dev))
4647 if (atomic_read(&bp->intr_sem) != 0)
4651 struct bnx2x_fastpath *fp = &bp->fp[0];
4655 rc = bnx2x_rx_int(fp, 1000);
4658 if (!BP_NOMCP(bp)) {
4659 int func = BP_FUNC(bp);
4663 ++bp->fw_drv_pulse_wr_seq;
4664 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4665 /* TBD - add SYSTEM_TIME */
4666 drv_pulse = bp->fw_drv_pulse_wr_seq;
4667 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4669 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4670 MCP_PULSE_SEQ_MASK);
4671 /* The delta between driver pulse and mcp response
4672 * should be 1 (before mcp response) or 0 (after mcp response)
4674 if ((drv_pulse != mcp_pulse) &&
4675 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4676 /* someone lost a heartbeat... */
4677 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4678 drv_pulse, mcp_pulse);
4682 if (bp->state == BNX2X_STATE_OPEN)
4683 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4686 mod_timer(&bp->timer, jiffies + bp->current_interval);
4689 /* end of Statistics */
4694 * nic init service functions
4697 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4699 int port = BP_PORT(bp);
4702 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4703 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4704 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4705 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4706 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4707 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4710 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4711 dma_addr_t mapping, int sb_id)
4713 int port = BP_PORT(bp);
4714 int func = BP_FUNC(bp);
4719 section = ((u64)mapping) + offsetof(struct host_status_block,
4721 sb->u_status_block.status_block_id = sb_id;
4723 REG_WR(bp, BAR_CSTRORM_INTMEM +
4724 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4725 REG_WR(bp, BAR_CSTRORM_INTMEM +
4726 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4728 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4729 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4731 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4732 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4733 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4736 section = ((u64)mapping) + offsetof(struct host_status_block,
4738 sb->c_status_block.status_block_id = sb_id;
4740 REG_WR(bp, BAR_CSTRORM_INTMEM +
4741 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4742 REG_WR(bp, BAR_CSTRORM_INTMEM +
4743 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4745 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4746 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4748 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4749 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4750 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4752 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4755 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4757 int func = BP_FUNC(bp);
4759 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4760 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4761 sizeof(struct tstorm_def_status_block)/4);
4762 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4763 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4764 sizeof(struct cstorm_def_status_block_u)/4);
4765 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4766 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4767 sizeof(struct cstorm_def_status_block_c)/4);
4768 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4769 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4770 sizeof(struct xstorm_def_status_block)/4);
4773 static void bnx2x_init_def_sb(struct bnx2x *bp,
4774 struct host_def_status_block *def_sb,
4775 dma_addr_t mapping, int sb_id)
4777 int port = BP_PORT(bp);
4778 int func = BP_FUNC(bp);
4779 int index, val, reg_offset;
4783 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4784 atten_status_block);
4785 def_sb->atten_status_block.status_block_id = sb_id;
4789 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4790 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4792 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4793 bp->attn_group[index].sig[0] = REG_RD(bp,
4794 reg_offset + 0x10*index);
4795 bp->attn_group[index].sig[1] = REG_RD(bp,
4796 reg_offset + 0x4 + 0x10*index);
4797 bp->attn_group[index].sig[2] = REG_RD(bp,
4798 reg_offset + 0x8 + 0x10*index);
4799 bp->attn_group[index].sig[3] = REG_RD(bp,
4800 reg_offset + 0xc + 0x10*index);
4803 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4804 HC_REG_ATTN_MSG0_ADDR_L);
4806 REG_WR(bp, reg_offset, U64_LO(section));
4807 REG_WR(bp, reg_offset + 4, U64_HI(section));
4809 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4811 val = REG_RD(bp, reg_offset);
4813 REG_WR(bp, reg_offset, val);
4816 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4817 u_def_status_block);
4818 def_sb->u_def_status_block.status_block_id = sb_id;
4820 REG_WR(bp, BAR_CSTRORM_INTMEM +
4821 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4822 REG_WR(bp, BAR_CSTRORM_INTMEM +
4823 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4825 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4826 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4828 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4829 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4830 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4833 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4834 c_def_status_block);
4835 def_sb->c_def_status_block.status_block_id = sb_id;
4837 REG_WR(bp, BAR_CSTRORM_INTMEM +
4838 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4839 REG_WR(bp, BAR_CSTRORM_INTMEM +
4840 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4842 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4843 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4845 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4846 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4847 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4850 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4851 t_def_status_block);
4852 def_sb->t_def_status_block.status_block_id = sb_id;
4854 REG_WR(bp, BAR_TSTRORM_INTMEM +
4855 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4856 REG_WR(bp, BAR_TSTRORM_INTMEM +
4857 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4859 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4860 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4862 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4863 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4864 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4867 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4868 x_def_status_block);
4869 def_sb->x_def_status_block.status_block_id = sb_id;
4871 REG_WR(bp, BAR_XSTRORM_INTMEM +
4872 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4873 REG_WR(bp, BAR_XSTRORM_INTMEM +
4874 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4876 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4877 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4879 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4880 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4881 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4883 bp->stats_pending = 0;
4884 bp->set_mac_pending = 0;
4886 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4889 static void bnx2x_update_coalesce(struct bnx2x *bp)
4891 int port = BP_PORT(bp);
4894 for_each_queue(bp, i) {
4895 int sb_id = bp->fp[i].sb_id;
4897 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4898 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4899 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4900 U_SB_ETH_RX_CQ_INDEX),
4901 bp->rx_ticks/(4 * BNX2X_BTR));
4902 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4903 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4904 U_SB_ETH_RX_CQ_INDEX),
4905 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4907 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4908 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4909 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4910 C_SB_ETH_TX_CQ_INDEX),
4911 bp->tx_ticks/(4 * BNX2X_BTR));
4912 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4913 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4914 C_SB_ETH_TX_CQ_INDEX),
4915 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4919 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4920 struct bnx2x_fastpath *fp, int last)
4924 for (i = 0; i < last; i++) {
4925 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4926 struct sk_buff *skb = rx_buf->skb;
4929 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4933 if (fp->tpa_state[i] == BNX2X_TPA_START)
4934 pci_unmap_single(bp->pdev,
4935 pci_unmap_addr(rx_buf, mapping),
4936 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4943 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4945 int func = BP_FUNC(bp);
4946 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4947 ETH_MAX_AGGREGATION_QUEUES_E1H;
4948 u16 ring_prod, cqe_ring_prod;
4951 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4953 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4955 if (bp->flags & TPA_ENABLE_FLAG) {
4957 for_each_queue(bp, j) {
4958 struct bnx2x_fastpath *fp = &bp->fp[j];
4960 for (i = 0; i < max_agg_queues; i++) {
4961 fp->tpa_pool[i].skb =
4962 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4963 if (!fp->tpa_pool[i].skb) {
4964 BNX2X_ERR("Failed to allocate TPA "
4965 "skb pool for queue[%d] - "
4966 "disabling TPA on this "
4968 bnx2x_free_tpa_pool(bp, fp, i);
4969 fp->disable_tpa = 1;
4972 pci_unmap_addr_set((struct sw_rx_bd *)
4973 &bp->fp->tpa_pool[i],
4975 fp->tpa_state[i] = BNX2X_TPA_STOP;
4980 for_each_queue(bp, j) {
4981 struct bnx2x_fastpath *fp = &bp->fp[j];
4984 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4985 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4987 /* "next page" elements initialization */
4989 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4990 struct eth_rx_sge *sge;
4992 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4994 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4995 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4997 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4998 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5001 bnx2x_init_sge_ring_bit_mask(fp);
5004 for (i = 1; i <= NUM_RX_RINGS; i++) {
5005 struct eth_rx_bd *rx_bd;
5007 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5009 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5010 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5012 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5013 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5017 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5018 struct eth_rx_cqe_next_page *nextpg;
5020 nextpg = (struct eth_rx_cqe_next_page *)
5021 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5023 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5024 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5026 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5027 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5030 /* Allocate SGEs and initialize the ring elements */
5031 for (i = 0, ring_prod = 0;
5032 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5034 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5035 BNX2X_ERR("was only able to allocate "
5037 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5038 /* Cleanup already allocated elements */
5039 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5040 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5041 fp->disable_tpa = 1;
5045 ring_prod = NEXT_SGE_IDX(ring_prod);
5047 fp->rx_sge_prod = ring_prod;
5049 /* Allocate BDs and initialize BD ring */
5050 fp->rx_comp_cons = 0;
5051 cqe_ring_prod = ring_prod = 0;
5052 for (i = 0; i < bp->rx_ring_size; i++) {
5053 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5054 BNX2X_ERR("was only able to allocate "
5055 "%d rx skbs on queue[%d]\n", i, j);
5056 fp->eth_q_stats.rx_skb_alloc_failed++;
5059 ring_prod = NEXT_RX_IDX(ring_prod);
5060 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5061 WARN_ON(ring_prod <= i);
5064 fp->rx_bd_prod = ring_prod;
5065 /* must not have more available CQEs than BDs */
5066 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5068 fp->rx_pkt = fp->rx_calls = 0;
5071 * this will generate an interrupt (to the TSTORM)
5072 * must only be done after chip is initialized
5074 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5079 REG_WR(bp, BAR_USTRORM_INTMEM +
5080 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5081 U64_LO(fp->rx_comp_mapping));
5082 REG_WR(bp, BAR_USTRORM_INTMEM +
5083 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5084 U64_HI(fp->rx_comp_mapping));
5088 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5092 for_each_queue(bp, j) {
5093 struct bnx2x_fastpath *fp = &bp->fp[j];
5095 for (i = 1; i <= NUM_TX_RINGS; i++) {
5096 struct eth_tx_next_bd *tx_next_bd =
5097 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5099 tx_next_bd->addr_hi =
5100 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5101 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5102 tx_next_bd->addr_lo =
5103 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5104 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5107 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5108 fp->tx_db.data.zero_fill1 = 0;
5109 fp->tx_db.data.prod = 0;
5111 fp->tx_pkt_prod = 0;
5112 fp->tx_pkt_cons = 0;
5115 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5120 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5122 int func = BP_FUNC(bp);
5124 spin_lock_init(&bp->spq_lock);
5126 bp->spq_left = MAX_SPQ_PENDING;
5127 bp->spq_prod_idx = 0;
5128 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5129 bp->spq_prod_bd = bp->spq;
5130 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5132 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5133 U64_LO(bp->spq_mapping));
5135 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5136 U64_HI(bp->spq_mapping));
5138 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5142 static void bnx2x_init_context(struct bnx2x *bp)
5147 for_each_queue(bp, i) {
5148 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5149 struct bnx2x_fastpath *fp = &bp->fp[i];
5150 u8 cl_id = fp->cl_id;
5152 context->ustorm_st_context.common.sb_index_numbers =
5153 BNX2X_RX_SB_INDEX_NUM;
5154 context->ustorm_st_context.common.clientId = cl_id;
5155 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5156 context->ustorm_st_context.common.flags =
5157 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5158 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5159 context->ustorm_st_context.common.statistics_counter_id =
5161 context->ustorm_st_context.common.mc_alignment_log_size =
5162 BNX2X_RX_ALIGN_SHIFT;
5163 context->ustorm_st_context.common.bd_buff_size =
5165 context->ustorm_st_context.common.bd_page_base_hi =
5166 U64_HI(fp->rx_desc_mapping);
5167 context->ustorm_st_context.common.bd_page_base_lo =
5168 U64_LO(fp->rx_desc_mapping);
5169 if (!fp->disable_tpa) {
5170 context->ustorm_st_context.common.flags |=
5171 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5172 context->ustorm_st_context.common.sge_buff_size =
5173 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5175 context->ustorm_st_context.common.sge_page_base_hi =
5176 U64_HI(fp->rx_sge_mapping);
5177 context->ustorm_st_context.common.sge_page_base_lo =
5178 U64_LO(fp->rx_sge_mapping);
5180 context->ustorm_st_context.common.max_sges_for_packet =
5181 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5182 context->ustorm_st_context.common.max_sges_for_packet =
5183 ((context->ustorm_st_context.common.
5184 max_sges_for_packet + PAGES_PER_SGE - 1) &
5185 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5188 context->ustorm_ag_context.cdu_usage =
5189 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5190 CDU_REGION_NUMBER_UCM_AG,
5191 ETH_CONNECTION_TYPE);
5193 context->xstorm_ag_context.cdu_reserved =
5194 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5195 CDU_REGION_NUMBER_XCM_AG,
5196 ETH_CONNECTION_TYPE);
5200 for_each_queue(bp, i) {
5201 struct bnx2x_fastpath *fp = &bp->fp[i];
5202 struct eth_context *context =
5203 bnx2x_sp(bp, context[i].eth);
5205 context->cstorm_st_context.sb_index_number =
5206 C_SB_ETH_TX_CQ_INDEX;
5207 context->cstorm_st_context.status_block_id = fp->sb_id;
5209 context->xstorm_st_context.tx_bd_page_base_hi =
5210 U64_HI(fp->tx_desc_mapping);
5211 context->xstorm_st_context.tx_bd_page_base_lo =
5212 U64_LO(fp->tx_desc_mapping);
5213 context->xstorm_st_context.statistics_data = (fp->cl_id |
5214 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5218 static void bnx2x_init_ind_table(struct bnx2x *bp)
5220 int func = BP_FUNC(bp);
5223 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5227 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5228 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5229 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5230 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5231 bp->fp->cl_id + (i % bp->num_queues));
5234 static void bnx2x_set_client_config(struct bnx2x *bp)
5236 struct tstorm_eth_client_config tstorm_client = {0};
5237 int port = BP_PORT(bp);
5240 tstorm_client.mtu = bp->dev->mtu;
5241 tstorm_client.config_flags =
5242 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5243 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5245 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5246 tstorm_client.config_flags |=
5247 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5248 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5252 for_each_queue(bp, i) {
5253 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5255 REG_WR(bp, BAR_TSTRORM_INTMEM +
5256 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5257 ((u32 *)&tstorm_client)[0]);
5258 REG_WR(bp, BAR_TSTRORM_INTMEM +
5259 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5260 ((u32 *)&tstorm_client)[1]);
5263 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5264 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5267 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5269 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5270 int mode = bp->rx_mode;
5271 int mask = bp->rx_mode_cl_mask;
5272 int func = BP_FUNC(bp);
5273 int port = BP_PORT(bp);
5275 /* All but management unicast packets should pass to the host as well */
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5278 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5279 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5280 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5282 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5285 case BNX2X_RX_MODE_NONE: /* no Rx */
5286 tstorm_mac_filter.ucast_drop_all = mask;
5287 tstorm_mac_filter.mcast_drop_all = mask;
5288 tstorm_mac_filter.bcast_drop_all = mask;
5291 case BNX2X_RX_MODE_NORMAL:
5292 tstorm_mac_filter.bcast_accept_all = mask;
5295 case BNX2X_RX_MODE_ALLMULTI:
5296 tstorm_mac_filter.mcast_accept_all = mask;
5297 tstorm_mac_filter.bcast_accept_all = mask;
5300 case BNX2X_RX_MODE_PROMISC:
5301 tstorm_mac_filter.ucast_accept_all = mask;
5302 tstorm_mac_filter.mcast_accept_all = mask;
5303 tstorm_mac_filter.bcast_accept_all = mask;
5304 /* pass management unicast packets as well */
5305 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5309 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5314 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5317 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5318 REG_WR(bp, BAR_TSTRORM_INTMEM +
5319 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5320 ((u32 *)&tstorm_mac_filter)[i]);
5322 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5323 ((u32 *)&tstorm_mac_filter)[i]); */
5326 if (mode != BNX2X_RX_MODE_NONE)
5327 bnx2x_set_client_config(bp);
5330 static void bnx2x_init_internal_common(struct bnx2x *bp)
5334 /* Zero this manually as its initialization is
5335 currently missing in the initTool */
5336 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5337 REG_WR(bp, BAR_USTRORM_INTMEM +
5338 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5341 static void bnx2x_init_internal_port(struct bnx2x *bp)
5343 int port = BP_PORT(bp);
5346 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5348 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5349 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5353 static void bnx2x_init_internal_func(struct bnx2x *bp)
5355 struct tstorm_eth_function_common_config tstorm_config = {0};
5356 struct stats_indication_flags stats_flags = {0};
5357 int port = BP_PORT(bp);
5358 int func = BP_FUNC(bp);
5364 tstorm_config.config_flags = MULTI_FLAGS(bp);
5365 tstorm_config.rss_result_mask = MULTI_MASK;
5368 /* Enable TPA if needed */
5369 if (bp->flags & TPA_ENABLE_FLAG)
5370 tstorm_config.config_flags |=
5371 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5374 tstorm_config.config_flags |=
5375 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5377 tstorm_config.leading_client_id = BP_L_ID(bp);
5379 REG_WR(bp, BAR_TSTRORM_INTMEM +
5380 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5381 (*(u32 *)&tstorm_config));
5383 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5384 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5385 bnx2x_set_storm_rx_mode(bp);
5387 for_each_queue(bp, i) {
5388 u8 cl_id = bp->fp[i].cl_id;
5390 /* reset xstorm per client statistics */
5391 offset = BAR_XSTRORM_INTMEM +
5392 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5394 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5395 REG_WR(bp, offset + j*4, 0);
5397 /* reset tstorm per client statistics */
5398 offset = BAR_TSTRORM_INTMEM +
5399 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5401 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5402 REG_WR(bp, offset + j*4, 0);
5404 /* reset ustorm per client statistics */
5405 offset = BAR_USTRORM_INTMEM +
5406 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5408 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5409 REG_WR(bp, offset + j*4, 0);
5412 /* Init statistics related context */
5413 stats_flags.collect_eth = 1;
5415 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5416 ((u32 *)&stats_flags)[0]);
5417 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5418 ((u32 *)&stats_flags)[1]);
5420 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5421 ((u32 *)&stats_flags)[0]);
5422 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5423 ((u32 *)&stats_flags)[1]);
5425 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5426 ((u32 *)&stats_flags)[0]);
5427 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5428 ((u32 *)&stats_flags)[1]);
5430 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5431 ((u32 *)&stats_flags)[0]);
5432 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5433 ((u32 *)&stats_flags)[1]);
5435 REG_WR(bp, BAR_XSTRORM_INTMEM +
5436 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5437 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5438 REG_WR(bp, BAR_XSTRORM_INTMEM +
5439 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5440 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5442 REG_WR(bp, BAR_TSTRORM_INTMEM +
5443 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5444 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5445 REG_WR(bp, BAR_TSTRORM_INTMEM +
5446 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5447 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5449 REG_WR(bp, BAR_USTRORM_INTMEM +
5450 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5451 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5452 REG_WR(bp, BAR_USTRORM_INTMEM +
5453 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5454 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5456 if (CHIP_IS_E1H(bp)) {
5457 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5459 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5461 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5463 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5466 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5470 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5472 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5473 SGE_PAGE_SIZE * PAGES_PER_SGE),
5475 for_each_queue(bp, i) {
5476 struct bnx2x_fastpath *fp = &bp->fp[i];
5478 REG_WR(bp, BAR_USTRORM_INTMEM +
5479 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5480 U64_LO(fp->rx_comp_mapping));
5481 REG_WR(bp, BAR_USTRORM_INTMEM +
5482 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5483 U64_HI(fp->rx_comp_mapping));
5486 REG_WR(bp, BAR_USTRORM_INTMEM +
5487 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5488 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5489 REG_WR(bp, BAR_USTRORM_INTMEM +
5490 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5491 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5493 REG_WR16(bp, BAR_USTRORM_INTMEM +
5494 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5498 /* dropless flow control */
5499 if (CHIP_IS_E1H(bp)) {
5500 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5502 rx_pause.bd_thr_low = 250;
5503 rx_pause.cqe_thr_low = 250;
5505 rx_pause.sge_thr_low = 0;
5506 rx_pause.bd_thr_high = 350;
5507 rx_pause.cqe_thr_high = 350;
5508 rx_pause.sge_thr_high = 0;
5510 for_each_queue(bp, i) {
5511 struct bnx2x_fastpath *fp = &bp->fp[i];
5513 if (!fp->disable_tpa) {
5514 rx_pause.sge_thr_low = 150;
5515 rx_pause.sge_thr_high = 250;
5519 offset = BAR_USTRORM_INTMEM +
5520 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5523 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5525 REG_WR(bp, offset + j*4,
5526 ((u32 *)&rx_pause)[j]);
5530 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5532 /* Init rate shaping and fairness contexts */
5536 /* During init there is no active link
5537 Until link is up, set link rate to 10Gbps */
5538 bp->link_vars.line_speed = SPEED_10000;
5539 bnx2x_init_port_minmax(bp);
5543 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5544 bnx2x_calc_vn_weight_sum(bp);
5546 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5547 bnx2x_init_vn_minmax(bp, 2*vn + port);
5549 /* Enable rate shaping and fairness */
5550 bp->cmng.flags.cmng_enables |=
5551 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5554 /* rate shaping and fairness are disabled */
5556 "single function mode minmax will be disabled\n");
5560 /* Store it to internal memory */
5562 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5563 REG_WR(bp, BAR_XSTRORM_INTMEM +
5564 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5565 ((u32 *)(&bp->cmng))[i]);
5568 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5570 switch (load_code) {
5571 case FW_MSG_CODE_DRV_LOAD_COMMON:
5572 bnx2x_init_internal_common(bp);
5575 case FW_MSG_CODE_DRV_LOAD_PORT:
5576 bnx2x_init_internal_port(bp);
5579 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5580 bnx2x_init_internal_func(bp);
5584 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5589 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5593 for_each_queue(bp, i) {
5594 struct bnx2x_fastpath *fp = &bp->fp[i];
5597 fp->state = BNX2X_FP_STATE_CLOSED;
5599 fp->cl_id = BP_L_ID(bp) + i;
5601 fp->sb_id = fp->cl_id + 1;
5603 fp->sb_id = fp->cl_id;
5606 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5607 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5608 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5610 bnx2x_update_fpsb_idx(fp);
5613 /* ensure status block indices were read */
5617 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5619 bnx2x_update_dsb_idx(bp);
5620 bnx2x_update_coalesce(bp);
5621 bnx2x_init_rx_rings(bp);
5622 bnx2x_init_tx_ring(bp);
5623 bnx2x_init_sp_ring(bp);
5624 bnx2x_init_context(bp);
5625 bnx2x_init_internal(bp, load_code);
5626 bnx2x_init_ind_table(bp);
5627 bnx2x_stats_init(bp);
5629 /* At this point, we are ready for interrupts */
5630 atomic_set(&bp->intr_sem, 0);
5632 /* flush all before enabling interrupts */
5636 bnx2x_int_enable(bp);
5638 /* Check for SPIO5 */
5639 bnx2x_attn_int_deasserted0(bp,
5640 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5641 AEU_INPUTS_ATTN_BITS_SPIO5);
5644 /* end of nic init */
5647 * gzip service functions
5650 static int bnx2x_gunzip_init(struct bnx2x *bp)
5652 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5653 &bp->gunzip_mapping);
5654 if (bp->gunzip_buf == NULL)
5657 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5658 if (bp->strm == NULL)
5661 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5663 if (bp->strm->workspace == NULL)
5673 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5674 bp->gunzip_mapping);
5675 bp->gunzip_buf = NULL;
5678 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5679 " un-compression\n", bp->dev->name);
5683 static void bnx2x_gunzip_end(struct bnx2x *bp)
5685 kfree(bp->strm->workspace);
5690 if (bp->gunzip_buf) {
5691 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5692 bp->gunzip_mapping);
5693 bp->gunzip_buf = NULL;
5697 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5701 /* check gzip header */
5702 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5703 BNX2X_ERR("Bad gzip header\n");
5711 if (zbuf[3] & FNAME)
5712 while ((zbuf[n++] != 0) && (n < len));
5714 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5715 bp->strm->avail_in = len - n;
5716 bp->strm->next_out = bp->gunzip_buf;
5717 bp->strm->avail_out = FW_BUF_SIZE;
5719 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5723 rc = zlib_inflate(bp->strm, Z_FINISH);
5724 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5725 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5726 bp->dev->name, bp->strm->msg);
5728 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5729 if (bp->gunzip_outlen & 0x3)
5730 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5731 " gunzip_outlen (%d) not aligned\n",
5732 bp->dev->name, bp->gunzip_outlen);
5733 bp->gunzip_outlen >>= 2;
5735 zlib_inflateEnd(bp->strm);
5737 if (rc == Z_STREAM_END)
5743 /* nic load/unload */
5746 * General service functions
5749 /* send a NIG loopback debug packet */
5750 static void bnx2x_lb_pckt(struct bnx2x *bp)
5754 /* Ethernet source and destination addresses */
5755 wb_write[0] = 0x55555555;
5756 wb_write[1] = 0x55555555;
5757 wb_write[2] = 0x20; /* SOP */
5758 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5760 /* NON-IP protocol */
5761 wb_write[0] = 0x09000000;
5762 wb_write[1] = 0x55555555;
5763 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5764 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5767 /* some of the internal memories
5768 * are not directly readable from the driver
5769 * to test them we send debug packets
5771 static int bnx2x_int_mem_test(struct bnx2x *bp)
5777 if (CHIP_REV_IS_FPGA(bp))
5779 else if (CHIP_REV_IS_EMUL(bp))
5784 DP(NETIF_MSG_HW, "start part1\n");
5786 /* Disable inputs of parser neighbor blocks */
5787 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5788 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5789 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5790 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5792 /* Write 0 to parser credits for CFC search request */
5793 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5795 /* send Ethernet packet */
5798 /* TODO do i reset NIG statistic? */
5799 /* Wait until NIG register shows 1 packet of size 0x10 */
5800 count = 1000 * factor;
5803 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5804 val = *bnx2x_sp(bp, wb_data[0]);
5812 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5816 /* Wait until PRS register shows 1 packet */
5817 count = 1000 * factor;
5819 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5827 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5831 /* Reset and init BRB, PRS */
5832 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5834 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5836 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5837 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5839 DP(NETIF_MSG_HW, "part2\n");
5841 /* Disable inputs of parser neighbor blocks */
5842 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5843 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5844 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5845 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5847 /* Write 0 to parser credits for CFC search request */
5848 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5850 /* send 10 Ethernet packets */
5851 for (i = 0; i < 10; i++)
5854 /* Wait until NIG register shows 10 + 1
5855 packets of size 11*0x10 = 0xb0 */
5856 count = 1000 * factor;
5859 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5860 val = *bnx2x_sp(bp, wb_data[0]);
5868 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5872 /* Wait until PRS register shows 2 packets */
5873 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5875 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5877 /* Write 1 to parser credits for CFC search request */
5878 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5880 /* Wait until PRS register shows 3 packets */
5881 msleep(10 * factor);
5882 /* Wait until NIG register shows 1 packet of size 0x10 */
5883 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5885 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5887 /* clear NIG EOP FIFO */
5888 for (i = 0; i < 11; i++)
5889 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5890 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5892 BNX2X_ERR("clear of NIG failed\n");
5896 /* Reset and init BRB, PRS, NIG */
5897 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5899 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5901 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5902 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5905 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5908 /* Enable inputs of parser neighbor blocks */
5909 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5910 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5911 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5912 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5914 DP(NETIF_MSG_HW, "done\n");
5919 static void enable_blocks_attention(struct bnx2x *bp)
5921 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5922 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5923 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5924 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5925 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5926 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5927 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5928 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5929 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5930 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5931 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5932 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5933 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5934 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5935 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5936 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5937 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5938 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5939 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5940 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5941 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5942 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5943 if (CHIP_REV_IS_FPGA(bp))
5944 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5946 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5947 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5948 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5949 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5950 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5951 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5952 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5953 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5954 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5955 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5959 static void bnx2x_reset_common(struct bnx2x *bp)
5962 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5964 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5967 static void bnx2x_init_pxp(struct bnx2x *bp)
5970 int r_order, w_order;
5972 pci_read_config_word(bp->pdev,
5973 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5974 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5975 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5977 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5979 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5983 bnx2x_init_pxp_arb(bp, r_order, w_order);
5986 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5992 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5993 SHARED_HW_CFG_FAN_FAILURE_MASK;
5995 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5999 * The fan failure mechanism is usually related to the PHY type since
6000 * the power consumption of the board is affected by the PHY. Currently,
6001 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6003 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6004 for (port = PORT_0; port < PORT_MAX; port++) {
6006 SHMEM_RD(bp, dev_info.port_hw_config[port].
6007 external_phy_config) &
6008 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6011 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6013 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6015 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6018 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6020 if (is_required == 0)
6023 /* Fan failure is indicated by SPIO 5 */
6024 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6025 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6027 /* set to active low mode */
6028 val = REG_RD(bp, MISC_REG_SPIO_INT);
6029 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6030 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6031 REG_WR(bp, MISC_REG_SPIO_INT, val);
6033 /* enable interrupt to signal the IGU */
6034 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6035 val |= (1 << MISC_REGISTERS_SPIO_5);
6036 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6039 static int bnx2x_init_common(struct bnx2x *bp)
6046 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6048 bnx2x_reset_common(bp);
6049 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6050 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6052 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6053 if (CHIP_IS_E1H(bp))
6054 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6056 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6058 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6060 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6061 if (CHIP_IS_E1(bp)) {
6062 /* enable HW interrupt from PXP on USDM overflow
6063 bit 16 on INT_MASK_0 */
6064 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6067 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6071 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6072 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6073 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6074 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6075 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6076 /* make sure this value is 0 */
6077 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6079 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6080 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6081 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6082 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6083 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6086 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6088 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6089 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6090 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6093 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6094 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6096 /* let the HW do it's magic ... */
6098 /* finish PXP init */
6099 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6101 BNX2X_ERR("PXP2 CFG failed\n");
6104 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6106 BNX2X_ERR("PXP2 RD_INIT failed\n");
6110 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6111 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6113 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6115 /* clean the DMAE memory */
6117 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6119 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6120 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6121 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6122 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6124 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6125 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6126 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6127 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6129 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6134 for (i = 0; i < 64; i++) {
6135 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6136 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6138 if (CHIP_IS_E1H(bp)) {
6139 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6140 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6145 /* soft reset pulse */
6146 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6147 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6150 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6153 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6154 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6155 if (!CHIP_REV_IS_SLOW(bp)) {
6156 /* enable hw interrupt from doorbell Q */
6157 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6160 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6161 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6162 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6165 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6167 if (CHIP_IS_E1H(bp))
6168 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6170 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6171 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6172 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6173 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6175 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6178 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6180 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6181 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6182 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6183 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6186 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6188 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6191 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6192 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6193 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6195 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6196 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6197 REG_WR(bp, i, 0xc0cac01a);
6198 /* TODO: replace with something meaningful */
6200 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6207 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6208 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6209 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6211 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6213 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6215 if (sizeof(union cdu_context) != 1024)
6216 /* we currently assume that a context is 1024 bytes */
6217 printk(KERN_ALERT PFX "please adjust the size of"
6218 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6220 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6221 val = (4 << 24) + (0 << 12) + 1024;
6222 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6224 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6225 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6226 /* enable context validation interrupt from CFC */
6227 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6229 /* set the thresholds to prevent CFC/CDU race */
6230 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6232 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6233 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6235 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6236 /* Reset PCIE errors for debug */
6237 REG_WR(bp, 0x2814, 0xffffffff);
6238 REG_WR(bp, 0x3820, 0xffffffff);
6240 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6241 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6242 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6243 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6245 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6246 if (CHIP_IS_E1H(bp)) {
6247 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6248 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6251 if (CHIP_REV_IS_SLOW(bp))
6254 /* finish CFC init */
6255 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6257 BNX2X_ERR("CFC LL_INIT failed\n");
6260 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6262 BNX2X_ERR("CFC AC_INIT failed\n");
6265 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6267 BNX2X_ERR("CFC CAM_INIT failed\n");
6270 REG_WR(bp, CFC_REG_DEBUG0, 0);
6272 /* read NIG statistic
6273 to see if this is our first up since powerup */
6274 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6275 val = *bnx2x_sp(bp, wb_data[0]);
6277 /* do internal memory self test */
6278 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6279 BNX2X_ERR("internal mem self test failed\n");
6283 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6285 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6286 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6287 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6288 bp->port.need_hw_lock = 1;
6295 bnx2x_setup_fan_failure_detection(bp);
6297 /* clear PXP2 attentions */
6298 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6300 enable_blocks_attention(bp);
6302 if (!BP_NOMCP(bp)) {
6303 bnx2x_acquire_phy_lock(bp);
6304 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6305 bnx2x_release_phy_lock(bp);
6307 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6312 static int bnx2x_init_port(struct bnx2x *bp)
6314 int port = BP_PORT(bp);
6315 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6319 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6321 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6323 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6324 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6326 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6327 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6328 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6329 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6332 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6334 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6335 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6336 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6338 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6340 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6341 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6342 /* no pause for emulation and FPGA */
6347 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6348 else if (bp->dev->mtu > 4096) {
6349 if (bp->flags & ONE_PORT_FLAG)
6353 /* (24*1024 + val*4)/256 */
6354 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6357 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6358 high = low + 56; /* 14*1024/256 */
6360 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6361 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6364 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6366 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6367 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6368 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6369 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6371 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6372 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6373 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6374 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6376 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6377 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6379 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6381 /* configure PBF to work without PAUSE mtu 9000 */
6382 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6384 /* update threshold */
6385 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6386 /* update init credit */
6387 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6390 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6392 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6395 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6397 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6398 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6400 if (CHIP_IS_E1(bp)) {
6401 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6402 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6404 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6406 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6407 /* init aeu_mask_attn_func_0/1:
6408 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6409 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6410 * bits 4-7 are used for "per vn group attention" */
6411 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6412 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6414 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6415 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6416 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6417 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6418 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6420 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6422 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6424 if (CHIP_IS_E1H(bp)) {
6425 /* 0x2 disable e1hov, 0x1 enable */
6426 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6427 (IS_E1HMF(bp) ? 0x1 : 0x2));
6430 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6431 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6432 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6436 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6437 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6439 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6440 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6442 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6444 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6445 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6447 /* The GPIO should be swapped if the swap register is
6449 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6450 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6452 /* Select function upon port-swap configuration */
6454 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6455 aeu_gpio_mask = (swap_val && swap_override) ?
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6457 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6459 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6460 aeu_gpio_mask = (swap_val && swap_override) ?
6461 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6462 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6464 val = REG_RD(bp, offset);
6465 /* add GPIO3 to group */
6466 val |= aeu_gpio_mask;
6467 REG_WR(bp, offset, val);
6471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6472 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6473 /* add SPIO 5 to group 0 */
6475 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6476 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6477 val = REG_RD(bp, reg_addr);
6478 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6479 REG_WR(bp, reg_addr, val);
6487 bnx2x__link_reset(bp);
6492 #define ILT_PER_FUNC (768/2)
6493 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6494 /* the phys address is shifted right 12 bits and has an added
6495 1=valid bit added to the 53rd bit
6496 then since this is a wide register(TM)
6497 we split it into two 32 bit writes
6499 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6500 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6501 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6502 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6505 #define CNIC_ILT_LINES 127
6506 #define CNIC_CTX_PER_ILT 16
6508 #define CNIC_ILT_LINES 0
6511 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6515 if (CHIP_IS_E1H(bp))
6516 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6518 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6520 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6523 static int bnx2x_init_func(struct bnx2x *bp)
6525 int port = BP_PORT(bp);
6526 int func = BP_FUNC(bp);
6530 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6532 /* set MSI reconfigure capability */
6533 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6534 val = REG_RD(bp, addr);
6535 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6536 REG_WR(bp, addr, val);
6538 i = FUNC_ILT_BASE(func);
6540 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6541 if (CHIP_IS_E1H(bp)) {
6542 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6543 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6545 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6546 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6549 i += 1 + CNIC_ILT_LINES;
6550 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6552 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6554 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6555 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6559 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6561 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6563 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6564 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6568 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6570 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6572 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6573 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6576 /* tell the searcher where the T2 table is */
6577 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6579 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6580 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6582 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6583 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6584 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6586 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6589 if (CHIP_IS_E1H(bp)) {
6590 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6591 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6592 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6593 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6594 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6595 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6596 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6597 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6598 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6600 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6601 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6604 /* HC init per function */
6605 if (CHIP_IS_E1H(bp)) {
6606 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6608 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6609 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6611 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6613 /* Reset PCIE errors for debug */
6614 REG_WR(bp, 0x2114, 0xffffffff);
6615 REG_WR(bp, 0x2120, 0xffffffff);
6620 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6624 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6625 BP_FUNC(bp), load_code);
6628 mutex_init(&bp->dmae_mutex);
6629 rc = bnx2x_gunzip_init(bp);
6633 switch (load_code) {
6634 case FW_MSG_CODE_DRV_LOAD_COMMON:
6635 rc = bnx2x_init_common(bp);
6640 case FW_MSG_CODE_DRV_LOAD_PORT:
6642 rc = bnx2x_init_port(bp);
6647 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6649 rc = bnx2x_init_func(bp);
6655 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6659 if (!BP_NOMCP(bp)) {
6660 int func = BP_FUNC(bp);
6662 bp->fw_drv_pulse_wr_seq =
6663 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6664 DRV_PULSE_SEQ_MASK);
6665 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6668 /* this needs to be done before gunzip end */
6669 bnx2x_zero_def_sb(bp);
6670 for_each_queue(bp, i)
6671 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6673 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6677 bnx2x_gunzip_end(bp);
6682 static void bnx2x_free_mem(struct bnx2x *bp)
6685 #define BNX2X_PCI_FREE(x, y, size) \
6688 pci_free_consistent(bp->pdev, size, x, y); \
6694 #define BNX2X_FREE(x) \
6706 for_each_queue(bp, i) {
6709 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6710 bnx2x_fp(bp, i, status_blk_mapping),
6711 sizeof(struct host_status_block));
6714 for_each_queue(bp, i) {
6716 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6717 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6718 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6719 bnx2x_fp(bp, i, rx_desc_mapping),
6720 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6722 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6723 bnx2x_fp(bp, i, rx_comp_mapping),
6724 sizeof(struct eth_fast_path_rx_cqe) *
6728 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6729 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6730 bnx2x_fp(bp, i, rx_sge_mapping),
6731 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6734 for_each_queue(bp, i) {
6736 /* fastpath tx rings: tx_buf tx_desc */
6737 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6738 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6739 bnx2x_fp(bp, i, tx_desc_mapping),
6740 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6742 /* end of fastpath */
6744 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6745 sizeof(struct host_def_status_block));
6747 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6748 sizeof(struct bnx2x_slowpath));
6751 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6752 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6753 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6754 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6755 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6756 sizeof(struct host_status_block));
6758 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6760 #undef BNX2X_PCI_FREE
6764 static int bnx2x_alloc_mem(struct bnx2x *bp)
6767 #define BNX2X_PCI_ALLOC(x, y, size) \
6769 x = pci_alloc_consistent(bp->pdev, size, y); \
6771 goto alloc_mem_err; \
6772 memset(x, 0, size); \
6775 #define BNX2X_ALLOC(x, size) \
6777 x = vmalloc(size); \
6779 goto alloc_mem_err; \
6780 memset(x, 0, size); \
6787 for_each_queue(bp, i) {
6788 bnx2x_fp(bp, i, bp) = bp;
6791 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6792 &bnx2x_fp(bp, i, status_blk_mapping),
6793 sizeof(struct host_status_block));
6796 for_each_queue(bp, i) {
6798 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6799 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6800 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6801 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6802 &bnx2x_fp(bp, i, rx_desc_mapping),
6803 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6805 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6806 &bnx2x_fp(bp, i, rx_comp_mapping),
6807 sizeof(struct eth_fast_path_rx_cqe) *
6811 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6812 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6813 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6814 &bnx2x_fp(bp, i, rx_sge_mapping),
6815 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6818 for_each_queue(bp, i) {
6820 /* fastpath tx rings: tx_buf tx_desc */
6821 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6822 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6823 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6824 &bnx2x_fp(bp, i, tx_desc_mapping),
6825 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6827 /* end of fastpath */
6829 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6830 sizeof(struct host_def_status_block));
6832 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6833 sizeof(struct bnx2x_slowpath));
6836 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6838 /* allocate searcher T2 table
6839 we allocate 1/4 of alloc num for T2
6840 (which is not entered into the ILT) */
6841 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6843 /* Initialize T2 (for 1024 connections) */
6844 for (i = 0; i < 16*1024; i += 64)
6845 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6847 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6848 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6850 /* QM queues (128*MAX_CONN) */
6851 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6853 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6854 sizeof(struct host_status_block));
6857 /* Slow path ring */
6858 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6866 #undef BNX2X_PCI_ALLOC
6870 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6874 for_each_queue(bp, i) {
6875 struct bnx2x_fastpath *fp = &bp->fp[i];
6877 u16 bd_cons = fp->tx_bd_cons;
6878 u16 sw_prod = fp->tx_pkt_prod;
6879 u16 sw_cons = fp->tx_pkt_cons;
6881 while (sw_cons != sw_prod) {
6882 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6888 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6892 for_each_queue(bp, j) {
6893 struct bnx2x_fastpath *fp = &bp->fp[j];
6895 for (i = 0; i < NUM_RX_BD; i++) {
6896 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6897 struct sk_buff *skb = rx_buf->skb;
6902 pci_unmap_single(bp->pdev,
6903 pci_unmap_addr(rx_buf, mapping),
6904 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6909 if (!fp->disable_tpa)
6910 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6911 ETH_MAX_AGGREGATION_QUEUES_E1 :
6912 ETH_MAX_AGGREGATION_QUEUES_E1H);
6916 static void bnx2x_free_skbs(struct bnx2x *bp)
6918 bnx2x_free_tx_skbs(bp);
6919 bnx2x_free_rx_skbs(bp);
6922 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6926 free_irq(bp->msix_table[0].vector, bp->dev);
6927 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6928 bp->msix_table[0].vector);
6933 for_each_queue(bp, i) {
6934 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6935 "state %x\n", i, bp->msix_table[i + offset].vector,
6936 bnx2x_fp(bp, i, state));
6938 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6942 static void bnx2x_free_irq(struct bnx2x *bp)
6944 if (bp->flags & USING_MSIX_FLAG) {
6945 bnx2x_free_msix_irqs(bp);
6946 pci_disable_msix(bp->pdev);
6947 bp->flags &= ~USING_MSIX_FLAG;
6949 } else if (bp->flags & USING_MSI_FLAG) {
6950 free_irq(bp->pdev->irq, bp->dev);
6951 pci_disable_msi(bp->pdev);
6952 bp->flags &= ~USING_MSI_FLAG;
6955 free_irq(bp->pdev->irq, bp->dev);
6958 static int bnx2x_enable_msix(struct bnx2x *bp)
6960 int i, rc, offset = 1;
6963 bp->msix_table[0].entry = igu_vec;
6964 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6967 igu_vec = BP_L_ID(bp) + offset;
6968 bp->msix_table[1].entry = igu_vec;
6969 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6972 for_each_queue(bp, i) {
6973 igu_vec = BP_L_ID(bp) + offset + i;
6974 bp->msix_table[i + offset].entry = igu_vec;
6975 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6976 "(fastpath #%u)\n", i + offset, igu_vec, i);
6979 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6980 BNX2X_NUM_QUEUES(bp) + offset);
6982 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6986 bp->flags |= USING_MSIX_FLAG;
6991 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6993 int i, rc, offset = 1;
6995 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6996 bp->dev->name, bp->dev);
6998 BNX2X_ERR("request sp irq failed\n");
7005 for_each_queue(bp, i) {
7006 struct bnx2x_fastpath *fp = &bp->fp[i];
7007 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7010 rc = request_irq(bp->msix_table[i + offset].vector,
7011 bnx2x_msix_fp_int, 0, fp->name, fp);
7013 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7014 bnx2x_free_msix_irqs(bp);
7018 fp->state = BNX2X_FP_STATE_IRQ;
7021 i = BNX2X_NUM_QUEUES(bp);
7022 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7024 bp->dev->name, bp->msix_table[0].vector,
7025 0, bp->msix_table[offset].vector,
7026 i - 1, bp->msix_table[offset + i - 1].vector);
7031 static int bnx2x_enable_msi(struct bnx2x *bp)
7035 rc = pci_enable_msi(bp->pdev);
7037 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7040 bp->flags |= USING_MSI_FLAG;
7045 static int bnx2x_req_irq(struct bnx2x *bp)
7047 unsigned long flags;
7050 if (bp->flags & USING_MSI_FLAG)
7053 flags = IRQF_SHARED;
7055 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7056 bp->dev->name, bp->dev);
7058 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7063 static void bnx2x_napi_enable(struct bnx2x *bp)
7067 for_each_queue(bp, i)
7068 napi_enable(&bnx2x_fp(bp, i, napi));
7071 static void bnx2x_napi_disable(struct bnx2x *bp)
7075 for_each_queue(bp, i)
7076 napi_disable(&bnx2x_fp(bp, i, napi));
7079 static void bnx2x_netif_start(struct bnx2x *bp)
7083 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7084 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7087 if (netif_running(bp->dev)) {
7088 bnx2x_napi_enable(bp);
7089 bnx2x_int_enable(bp);
7090 if (bp->state == BNX2X_STATE_OPEN)
7091 netif_tx_wake_all_queues(bp->dev);
7096 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7098 bnx2x_int_disable_sync(bp, disable_hw);
7099 bnx2x_napi_disable(bp);
7100 netif_tx_disable(bp->dev);
7101 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7105 * Init service functions
7109 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7111 * @param bp driver descriptor
7112 * @param set set or clear an entry (1 or 0)
7113 * @param mac pointer to a buffer containing a MAC
7114 * @param cl_bit_vec bit vector of clients to register a MAC for
7115 * @param cam_offset offset in a CAM to use
7116 * @param with_bcast set broadcast MAC as well
7118 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7119 u32 cl_bit_vec, u8 cam_offset,
7122 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7123 int port = BP_PORT(bp);
7126 * unicasts 0-31:port0 32-63:port1
7127 * multicast 64-127:port0 128-191:port1
7129 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7130 config->hdr.offset = cam_offset;
7131 config->hdr.client_id = 0xff;
7132 config->hdr.reserved1 = 0;
7135 config->config_table[0].cam_entry.msb_mac_addr =
7136 swab16(*(u16 *)&mac[0]);
7137 config->config_table[0].cam_entry.middle_mac_addr =
7138 swab16(*(u16 *)&mac[2]);
7139 config->config_table[0].cam_entry.lsb_mac_addr =
7140 swab16(*(u16 *)&mac[4]);
7141 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7143 config->config_table[0].target_table_entry.flags = 0;
7145 CAM_INVALIDATE(config->config_table[0]);
7146 config->config_table[0].target_table_entry.clients_bit_vector =
7147 cpu_to_le32(cl_bit_vec);
7148 config->config_table[0].target_table_entry.vlan_id = 0;
7150 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7151 (set ? "setting" : "clearing"),
7152 config->config_table[0].cam_entry.msb_mac_addr,
7153 config->config_table[0].cam_entry.middle_mac_addr,
7154 config->config_table[0].cam_entry.lsb_mac_addr);
7158 config->config_table[1].cam_entry.msb_mac_addr =
7159 cpu_to_le16(0xffff);
7160 config->config_table[1].cam_entry.middle_mac_addr =
7161 cpu_to_le16(0xffff);
7162 config->config_table[1].cam_entry.lsb_mac_addr =
7163 cpu_to_le16(0xffff);
7164 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7166 config->config_table[1].target_table_entry.flags =
7167 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7169 CAM_INVALIDATE(config->config_table[1]);
7170 config->config_table[1].target_table_entry.clients_bit_vector =
7171 cpu_to_le32(cl_bit_vec);
7172 config->config_table[1].target_table_entry.vlan_id = 0;
7175 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7176 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7177 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7181 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7183 * @param bp driver descriptor
7184 * @param set set or clear an entry (1 or 0)
7185 * @param mac pointer to a buffer containing a MAC
7186 * @param cl_bit_vec bit vector of clients to register a MAC for
7187 * @param cam_offset offset in a CAM to use
7189 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7190 u32 cl_bit_vec, u8 cam_offset)
7192 struct mac_configuration_cmd_e1h *config =
7193 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7195 config->hdr.length = 1;
7196 config->hdr.offset = cam_offset;
7197 config->hdr.client_id = 0xff;
7198 config->hdr.reserved1 = 0;
7201 config->config_table[0].msb_mac_addr =
7202 swab16(*(u16 *)&mac[0]);
7203 config->config_table[0].middle_mac_addr =
7204 swab16(*(u16 *)&mac[2]);
7205 config->config_table[0].lsb_mac_addr =
7206 swab16(*(u16 *)&mac[4]);
7207 config->config_table[0].clients_bit_vector =
7208 cpu_to_le32(cl_bit_vec);
7209 config->config_table[0].vlan_id = 0;
7210 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7212 config->config_table[0].flags = BP_PORT(bp);
7214 config->config_table[0].flags =
7215 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7217 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7218 (set ? "setting" : "clearing"),
7219 config->config_table[0].msb_mac_addr,
7220 config->config_table[0].middle_mac_addr,
7221 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7223 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7224 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7225 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7228 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7229 int *state_p, int poll)
7231 /* can take a while if any port is running */
7234 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7235 poll ? "polling" : "waiting", state, idx);
7240 bnx2x_rx_int(bp->fp, 10);
7241 /* if index is different from 0
7242 * the reply for some commands will
7243 * be on the non default queue
7246 bnx2x_rx_int(&bp->fp[idx], 10);
7249 mb(); /* state is changed by bnx2x_sp_event() */
7250 if (*state_p == state) {
7251 #ifdef BNX2X_STOP_ON_ERROR
7252 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7264 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7265 poll ? "polling" : "waiting", state, idx);
7266 #ifdef BNX2X_STOP_ON_ERROR
7273 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7275 bp->set_mac_pending++;
7278 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7279 (1 << bp->fp->cl_id), BP_FUNC(bp));
7281 /* Wait for a completion */
7282 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7285 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7287 bp->set_mac_pending++;
7290 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7291 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7294 /* Wait for a completion */
7295 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7300 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7301 * MAC(s). This function will wait until the ramdord completion
7304 * @param bp driver handle
7305 * @param set set or clear the CAM entry
7307 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7309 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7311 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7313 bp->set_mac_pending++;
7316 /* Send a SET_MAC ramrod */
7318 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7319 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7322 /* CAM allocation for E1H
7323 * unicasts: by func number
7324 * multicast: 20+FUNC*20, 20 each
7326 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7327 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7329 /* Wait for a completion when setting */
7330 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7336 static int bnx2x_setup_leading(struct bnx2x *bp)
7340 /* reset IGU state */
7341 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7344 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7346 /* Wait for completion */
7347 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7352 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7354 struct bnx2x_fastpath *fp = &bp->fp[index];
7356 /* reset IGU state */
7357 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7360 fp->state = BNX2X_FP_STATE_OPENING;
7361 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7364 /* Wait for completion */
7365 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7369 static int bnx2x_poll(struct napi_struct *napi, int budget);
7371 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7374 switch (bp->multi_mode) {
7375 case ETH_RSS_MODE_DISABLED:
7379 case ETH_RSS_MODE_REGULAR:
7381 bp->num_queues = min_t(u32, num_queues,
7382 BNX2X_MAX_QUEUES(bp));
7384 bp->num_queues = min_t(u32, num_online_cpus(),
7385 BNX2X_MAX_QUEUES(bp));
7395 static int bnx2x_set_num_queues(struct bnx2x *bp)
7403 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7408 /* Set number of queues according to bp->multi_mode value */
7409 bnx2x_set_num_queues_msix(bp);
7411 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7414 /* if we can't use MSI-X we only need one fp,
7415 * so try to enable MSI-X with the requested number of fp's
7416 * and fallback to MSI or legacy INTx with one fp
7418 rc = bnx2x_enable_msix(bp);
7420 /* failed to enable MSI-X */
7424 bp->dev->real_num_tx_queues = bp->num_queues;
7429 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7430 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7433 /* must be called with rtnl_lock */
7434 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7439 #ifdef BNX2X_STOP_ON_ERROR
7440 if (unlikely(bp->panic))
7444 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7446 rc = bnx2x_set_num_queues(bp);
7448 if (bnx2x_alloc_mem(bp))
7451 for_each_queue(bp, i)
7452 bnx2x_fp(bp, i, disable_tpa) =
7453 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7455 for_each_queue(bp, i)
7456 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7459 bnx2x_napi_enable(bp);
7461 if (bp->flags & USING_MSIX_FLAG) {
7462 rc = bnx2x_req_msix_irqs(bp);
7464 pci_disable_msix(bp->pdev);
7468 /* Fall to INTx if failed to enable MSI-X due to lack of
7469 memory (in bnx2x_set_num_queues()) */
7470 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7471 bnx2x_enable_msi(bp);
7473 rc = bnx2x_req_irq(bp);
7475 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7476 if (bp->flags & USING_MSI_FLAG)
7477 pci_disable_msi(bp->pdev);
7480 if (bp->flags & USING_MSI_FLAG) {
7481 bp->dev->irq = bp->pdev->irq;
7482 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7483 bp->dev->name, bp->pdev->irq);
7487 /* Send LOAD_REQUEST command to MCP
7488 Returns the type of LOAD command:
7489 if it is the first port to be initialized
7490 common blocks should be initialized, otherwise - not
7492 if (!BP_NOMCP(bp)) {
7493 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7495 BNX2X_ERR("MCP response failure, aborting\n");
7499 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7500 rc = -EBUSY; /* other port in diagnostic mode */
7505 int port = BP_PORT(bp);
7507 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7508 load_count[0], load_count[1], load_count[2]);
7510 load_count[1 + port]++;
7511 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7512 load_count[0], load_count[1], load_count[2]);
7513 if (load_count[0] == 1)
7514 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7515 else if (load_count[1 + port] == 1)
7516 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7518 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7521 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7522 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7526 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7529 rc = bnx2x_init_hw(bp, load_code);
7531 BNX2X_ERR("HW init failed, aborting\n");
7535 /* Setup NIC internals and enable interrupts */
7536 bnx2x_nic_init(bp, load_code);
7538 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7539 (bp->common.shmem2_base))
7540 SHMEM2_WR(bp, dcc_support,
7541 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7542 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7544 /* Send LOAD_DONE command to MCP */
7545 if (!BP_NOMCP(bp)) {
7546 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7548 BNX2X_ERR("MCP response failure, aborting\n");
7554 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7556 rc = bnx2x_setup_leading(bp);
7558 BNX2X_ERR("Setup leading failed!\n");
7559 #ifndef BNX2X_STOP_ON_ERROR
7567 if (CHIP_IS_E1H(bp))
7568 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7569 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7570 bp->flags |= MF_FUNC_DIS;
7573 if (bp->state == BNX2X_STATE_OPEN) {
7575 /* Enable Timer scan */
7576 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7578 for_each_nondefault_queue(bp, i) {
7579 rc = bnx2x_setup_multi(bp, i);
7589 bnx2x_set_eth_mac_addr_e1(bp, 1);
7591 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7593 /* Set iSCSI L2 MAC */
7594 mutex_lock(&bp->cnic_mutex);
7595 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7596 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7597 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7599 mutex_unlock(&bp->cnic_mutex);
7604 bnx2x_initial_phy_init(bp, load_mode);
7606 /* Start fast path */
7607 switch (load_mode) {
7609 if (bp->state == BNX2X_STATE_OPEN) {
7610 /* Tx queue should be only reenabled */
7611 netif_tx_wake_all_queues(bp->dev);
7613 /* Initialize the receive filter. */
7614 bnx2x_set_rx_mode(bp->dev);
7618 netif_tx_start_all_queues(bp->dev);
7619 if (bp->state != BNX2X_STATE_OPEN)
7620 netif_tx_disable(bp->dev);
7621 /* Initialize the receive filter. */
7622 bnx2x_set_rx_mode(bp->dev);
7626 /* Initialize the receive filter. */
7627 bnx2x_set_rx_mode(bp->dev);
7628 bp->state = BNX2X_STATE_DIAG;
7636 bnx2x__link_status_update(bp);
7638 /* start the timer */
7639 mod_timer(&bp->timer, jiffies + bp->current_interval);
7642 bnx2x_setup_cnic_irq_info(bp);
7643 if (bp->state == BNX2X_STATE_OPEN)
7644 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7651 /* Disable Timer scan */
7652 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7655 bnx2x_int_disable_sync(bp, 1);
7656 if (!BP_NOMCP(bp)) {
7657 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7658 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7661 /* Free SKBs, SGEs, TPA pool and driver internals */
7662 bnx2x_free_skbs(bp);
7663 for_each_queue(bp, i)
7664 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7669 bnx2x_napi_disable(bp);
7670 for_each_queue(bp, i)
7671 netif_napi_del(&bnx2x_fp(bp, i, napi));
7677 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7679 struct bnx2x_fastpath *fp = &bp->fp[index];
7682 /* halt the connection */
7683 fp->state = BNX2X_FP_STATE_HALTING;
7684 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7686 /* Wait for completion */
7687 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7689 if (rc) /* timeout */
7692 /* delete cfc entry */
7693 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7695 /* Wait for completion */
7696 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7701 static int bnx2x_stop_leading(struct bnx2x *bp)
7703 __le16 dsb_sp_prod_idx;
7704 /* if the other port is handling traffic,
7705 this can take a lot of time */
7711 /* Send HALT ramrod */
7712 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7713 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7715 /* Wait for completion */
7716 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7717 &(bp->fp[0].state), 1);
7718 if (rc) /* timeout */
7721 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7723 /* Send PORT_DELETE ramrod */
7724 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7726 /* Wait for completion to arrive on default status block
7727 we are going to reset the chip anyway
7728 so there is not much to do if this times out
7730 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7732 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7733 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7734 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7735 #ifdef BNX2X_STOP_ON_ERROR
7743 rmb(); /* Refresh the dsb_sp_prod */
7745 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7746 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7751 static void bnx2x_reset_func(struct bnx2x *bp)
7753 int port = BP_PORT(bp);
7754 int func = BP_FUNC(bp);
7758 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7759 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7762 /* Disable Timer scan */
7763 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7765 * Wait for at least 10ms and up to 2 second for the timers scan to
7768 for (i = 0; i < 200; i++) {
7770 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7775 base = FUNC_ILT_BASE(func);
7776 for (i = base; i < base + ILT_PER_FUNC; i++)
7777 bnx2x_ilt_wr(bp, i, 0);
7780 static void bnx2x_reset_port(struct bnx2x *bp)
7782 int port = BP_PORT(bp);
7785 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7787 /* Do not rcv packets to BRB */
7788 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7789 /* Do not direct rcv packets that are not for MCP to the BRB */
7790 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7791 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7794 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7797 /* Check for BRB port occupancy */
7798 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7800 DP(NETIF_MSG_IFDOWN,
7801 "BRB1 is not empty %d blocks are occupied\n", val);
7803 /* TODO: Close Doorbell port? */
7806 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7808 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7809 BP_FUNC(bp), reset_code);
7811 switch (reset_code) {
7812 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7813 bnx2x_reset_port(bp);
7814 bnx2x_reset_func(bp);
7815 bnx2x_reset_common(bp);
7818 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7819 bnx2x_reset_port(bp);
7820 bnx2x_reset_func(bp);
7823 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7824 bnx2x_reset_func(bp);
7828 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7833 /* must be called with rtnl_lock */
7834 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7836 int port = BP_PORT(bp);
7841 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7843 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7845 /* Set "drop all" */
7846 bp->rx_mode = BNX2X_RX_MODE_NONE;
7847 bnx2x_set_storm_rx_mode(bp);
7849 /* Disable HW interrupts, NAPI and Tx */
7850 bnx2x_netif_stop(bp, 1);
7852 del_timer_sync(&bp->timer);
7853 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7854 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7855 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7860 /* Wait until tx fastpath tasks complete */
7861 for_each_queue(bp, i) {
7862 struct bnx2x_fastpath *fp = &bp->fp[i];
7865 while (bnx2x_has_tx_work_unload(fp)) {
7869 BNX2X_ERR("timeout waiting for queue[%d]\n",
7871 #ifdef BNX2X_STOP_ON_ERROR
7882 /* Give HW time to discard old tx messages */
7885 if (CHIP_IS_E1(bp)) {
7886 struct mac_configuration_cmd *config =
7887 bnx2x_sp(bp, mcast_config);
7889 bnx2x_set_eth_mac_addr_e1(bp, 0);
7891 for (i = 0; i < config->hdr.length; i++)
7892 CAM_INVALIDATE(config->config_table[i]);
7894 config->hdr.length = i;
7895 if (CHIP_REV_IS_SLOW(bp))
7896 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7898 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7899 config->hdr.client_id = bp->fp->cl_id;
7900 config->hdr.reserved1 = 0;
7902 bp->set_mac_pending++;
7905 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7906 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7907 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7910 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7912 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7914 for (i = 0; i < MC_HASH_SIZE; i++)
7915 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7917 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7920 /* Clear iSCSI L2 MAC */
7921 mutex_lock(&bp->cnic_mutex);
7922 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7923 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7924 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7926 mutex_unlock(&bp->cnic_mutex);
7929 if (unload_mode == UNLOAD_NORMAL)
7930 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7932 else if (bp->flags & NO_WOL_FLAG)
7933 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7936 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7937 u8 *mac_addr = bp->dev->dev_addr;
7939 /* The mac address is written to entries 1-4 to
7940 preserve entry 0 which is used by the PMF */
7941 u8 entry = (BP_E1HVN(bp) + 1)*8;
7943 val = (mac_addr[0] << 8) | mac_addr[1];
7944 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7946 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7947 (mac_addr[4] << 8) | mac_addr[5];
7948 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7950 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7953 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7955 /* Close multi and leading connections
7956 Completions for ramrods are collected in a synchronous way */
7957 for_each_nondefault_queue(bp, i)
7958 if (bnx2x_stop_multi(bp, i))
7961 rc = bnx2x_stop_leading(bp);
7963 BNX2X_ERR("Stop leading failed!\n");
7964 #ifdef BNX2X_STOP_ON_ERROR
7973 reset_code = bnx2x_fw_command(bp, reset_code);
7975 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7976 load_count[0], load_count[1], load_count[2]);
7978 load_count[1 + port]--;
7979 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7980 load_count[0], load_count[1], load_count[2]);
7981 if (load_count[0] == 0)
7982 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7983 else if (load_count[1 + port] == 0)
7984 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7986 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7989 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7990 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7991 bnx2x__link_reset(bp);
7993 /* Reset the chip */
7994 bnx2x_reset_chip(bp, reset_code);
7996 /* Report UNLOAD_DONE to MCP */
7998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8002 /* Free SKBs, SGEs, TPA pool and driver internals */
8003 bnx2x_free_skbs(bp);
8004 for_each_queue(bp, i)
8005 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8006 for_each_queue(bp, i)
8007 netif_napi_del(&bnx2x_fp(bp, i, napi));
8010 bp->state = BNX2X_STATE_CLOSED;
8012 netif_carrier_off(bp->dev);
8017 static void bnx2x_reset_task(struct work_struct *work)
8019 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8021 #ifdef BNX2X_STOP_ON_ERROR
8022 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8023 " so reset not done to allow debug dump,\n"
8024 " you will need to reboot when done\n");
8030 if (!netif_running(bp->dev))
8031 goto reset_task_exit;
8033 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8034 bnx2x_nic_load(bp, LOAD_NORMAL);
8040 /* end of nic load/unload */
8045 * Init service functions
8048 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8051 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8052 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8053 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8054 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8055 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8056 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8057 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8058 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8060 BNX2X_ERR("Unsupported function index: %d\n", func);
8065 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8067 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8069 /* Flush all outstanding writes */
8072 /* Pretend to be function 0 */
8074 /* Flush the GRC transaction (in the chip) */
8075 new_val = REG_RD(bp, reg);
8077 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8082 /* From now we are in the "like-E1" mode */
8083 bnx2x_int_disable(bp);
8085 /* Flush all outstanding writes */
8088 /* Restore the original funtion settings */
8089 REG_WR(bp, reg, orig_func);
8090 new_val = REG_RD(bp, reg);
8091 if (new_val != orig_func) {
8092 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8093 orig_func, new_val);
8098 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8100 if (CHIP_IS_E1H(bp))
8101 bnx2x_undi_int_disable_e1h(bp, func);
8103 bnx2x_int_disable(bp);
8106 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8110 /* Check if there is any driver already loaded */
8111 val = REG_RD(bp, MISC_REG_UNPREPARED);
8113 /* Check if it is the UNDI driver
8114 * UNDI driver initializes CID offset for normal bell to 0x7
8116 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8117 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8119 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8121 int func = BP_FUNC(bp);
8125 /* clear the UNDI indication */
8126 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8128 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8130 /* try unload UNDI on port 0 */
8133 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8134 DRV_MSG_SEQ_NUMBER_MASK);
8135 reset_code = bnx2x_fw_command(bp, reset_code);
8137 /* if UNDI is loaded on the other port */
8138 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8140 /* send "DONE" for previous unload */
8141 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8143 /* unload UNDI on port 1 */
8146 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8147 DRV_MSG_SEQ_NUMBER_MASK);
8148 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8150 bnx2x_fw_command(bp, reset_code);
8153 /* now it's safe to release the lock */
8154 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8156 bnx2x_undi_int_disable(bp, func);
8158 /* close input traffic and wait for it */
8159 /* Do not rcv packets to BRB */
8161 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8162 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8163 /* Do not direct rcv packets that are not for MCP to
8166 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8167 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8170 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8171 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8174 /* save NIG port swap info */
8175 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8176 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8179 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8182 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8184 /* take the NIG out of reset and restore swap values */
8186 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8187 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8188 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8189 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8191 /* send unload done to the MCP */
8192 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8194 /* restore our func and fw_seq */
8197 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8198 DRV_MSG_SEQ_NUMBER_MASK);
8201 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8205 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8207 u32 val, val2, val3, val4, id;
8210 /* Get the chip revision id and number. */
8211 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8212 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8213 id = ((val & 0xffff) << 16);
8214 val = REG_RD(bp, MISC_REG_CHIP_REV);
8215 id |= ((val & 0xf) << 12);
8216 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8217 id |= ((val & 0xff) << 4);
8218 val = REG_RD(bp, MISC_REG_BOND_ID);
8220 bp->common.chip_id = id;
8221 bp->link_params.chip_id = bp->common.chip_id;
8222 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8224 val = (REG_RD(bp, 0x2874) & 0x55);
8225 if ((bp->common.chip_id & 0x1) ||
8226 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8227 bp->flags |= ONE_PORT_FLAG;
8228 BNX2X_DEV_INFO("single port device\n");
8231 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8232 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8233 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8234 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8235 bp->common.flash_size, bp->common.flash_size);
8237 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8238 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8239 bp->link_params.shmem_base = bp->common.shmem_base;
8240 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8241 bp->common.shmem_base, bp->common.shmem2_base);
8243 if (!bp->common.shmem_base ||
8244 (bp->common.shmem_base < 0xA0000) ||
8245 (bp->common.shmem_base >= 0xC0000)) {
8246 BNX2X_DEV_INFO("MCP not active\n");
8247 bp->flags |= NO_MCP_FLAG;
8251 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8252 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8253 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8254 BNX2X_ERR("BAD MCP validity signature\n");
8256 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8257 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8259 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8260 SHARED_HW_CFG_LED_MODE_MASK) >>
8261 SHARED_HW_CFG_LED_MODE_SHIFT);
8263 bp->link_params.feature_config_flags = 0;
8264 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8265 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8266 bp->link_params.feature_config_flags |=
8267 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8269 bp->link_params.feature_config_flags &=
8270 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8272 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8273 bp->common.bc_ver = val;
8274 BNX2X_DEV_INFO("bc_ver %X\n", val);
8275 if (val < BNX2X_BC_VER) {
8276 /* for now only warn
8277 * later we might need to enforce this */
8278 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8279 " please upgrade BC\n", BNX2X_BC_VER, val);
8281 bp->link_params.feature_config_flags |=
8282 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8283 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8285 if (BP_E1HVN(bp) == 0) {
8286 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8287 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8289 /* no WOL capability for E1HVN != 0 */
8290 bp->flags |= NO_WOL_FLAG;
8292 BNX2X_DEV_INFO("%sWoL capable\n",
8293 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8295 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8296 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8297 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8298 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8300 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8301 val, val2, val3, val4);
8304 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8307 int port = BP_PORT(bp);
8310 switch (switch_cfg) {
8312 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8315 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8316 switch (ext_phy_type) {
8317 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8318 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8321 bp->port.supported |= (SUPPORTED_10baseT_Half |
8322 SUPPORTED_10baseT_Full |
8323 SUPPORTED_100baseT_Half |
8324 SUPPORTED_100baseT_Full |
8325 SUPPORTED_1000baseT_Full |
8326 SUPPORTED_2500baseX_Full |
8331 SUPPORTED_Asym_Pause);
8334 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8335 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8338 bp->port.supported |= (SUPPORTED_10baseT_Half |
8339 SUPPORTED_10baseT_Full |
8340 SUPPORTED_100baseT_Half |
8341 SUPPORTED_100baseT_Full |
8342 SUPPORTED_1000baseT_Full |
8347 SUPPORTED_Asym_Pause);
8351 BNX2X_ERR("NVRAM config error. "
8352 "BAD SerDes ext_phy_config 0x%x\n",
8353 bp->link_params.ext_phy_config);
8357 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8359 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8362 case SWITCH_CFG_10G:
8363 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8366 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8367 switch (ext_phy_type) {
8368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8369 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8372 bp->port.supported |= (SUPPORTED_10baseT_Half |
8373 SUPPORTED_10baseT_Full |
8374 SUPPORTED_100baseT_Half |
8375 SUPPORTED_100baseT_Full |
8376 SUPPORTED_1000baseT_Full |
8377 SUPPORTED_2500baseX_Full |
8378 SUPPORTED_10000baseT_Full |
8383 SUPPORTED_Asym_Pause);
8386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8387 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8390 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8391 SUPPORTED_1000baseT_Full |
8395 SUPPORTED_Asym_Pause);
8398 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8399 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8402 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8403 SUPPORTED_2500baseX_Full |
8404 SUPPORTED_1000baseT_Full |
8408 SUPPORTED_Asym_Pause);
8411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8412 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8415 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8418 SUPPORTED_Asym_Pause);
8421 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8422 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8425 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8426 SUPPORTED_1000baseT_Full |
8429 SUPPORTED_Asym_Pause);
8432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8433 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8436 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8437 SUPPORTED_1000baseT_Full |
8441 SUPPORTED_Asym_Pause);
8444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8445 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8448 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8449 SUPPORTED_1000baseT_Full |
8453 SUPPORTED_Asym_Pause);
8456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8457 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8460 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8464 SUPPORTED_Asym_Pause);
8467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8468 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8471 bp->port.supported |= (SUPPORTED_10baseT_Half |
8472 SUPPORTED_10baseT_Full |
8473 SUPPORTED_100baseT_Half |
8474 SUPPORTED_100baseT_Full |
8475 SUPPORTED_1000baseT_Full |
8476 SUPPORTED_10000baseT_Full |
8480 SUPPORTED_Asym_Pause);
8483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8484 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8485 bp->link_params.ext_phy_config);
8489 BNX2X_ERR("NVRAM config error. "
8490 "BAD XGXS ext_phy_config 0x%x\n",
8491 bp->link_params.ext_phy_config);
8495 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8497 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8502 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8503 bp->port.link_config);
8506 bp->link_params.phy_addr = bp->port.phy_addr;
8508 /* mask what we support according to speed_cap_mask */
8509 if (!(bp->link_params.speed_cap_mask &
8510 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8511 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8513 if (!(bp->link_params.speed_cap_mask &
8514 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8515 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8517 if (!(bp->link_params.speed_cap_mask &
8518 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8519 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8521 if (!(bp->link_params.speed_cap_mask &
8522 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8523 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8525 if (!(bp->link_params.speed_cap_mask &
8526 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8527 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8528 SUPPORTED_1000baseT_Full);
8530 if (!(bp->link_params.speed_cap_mask &
8531 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8532 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8534 if (!(bp->link_params.speed_cap_mask &
8535 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8536 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8538 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8541 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8543 bp->link_params.req_duplex = DUPLEX_FULL;
8545 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8546 case PORT_FEATURE_LINK_SPEED_AUTO:
8547 if (bp->port.supported & SUPPORTED_Autoneg) {
8548 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8549 bp->port.advertising = bp->port.supported;
8552 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8554 if ((ext_phy_type ==
8555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8557 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8558 /* force 10G, no AN */
8559 bp->link_params.req_line_speed = SPEED_10000;
8560 bp->port.advertising =
8561 (ADVERTISED_10000baseT_Full |
8565 BNX2X_ERR("NVRAM config error. "
8566 "Invalid link_config 0x%x"
8567 " Autoneg not supported\n",
8568 bp->port.link_config);
8573 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8574 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8575 bp->link_params.req_line_speed = SPEED_10;
8576 bp->port.advertising = (ADVERTISED_10baseT_Full |
8579 BNX2X_ERR("NVRAM config error. "
8580 "Invalid link_config 0x%x"
8581 " speed_cap_mask 0x%x\n",
8582 bp->port.link_config,
8583 bp->link_params.speed_cap_mask);
8588 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8589 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8590 bp->link_params.req_line_speed = SPEED_10;
8591 bp->link_params.req_duplex = DUPLEX_HALF;
8592 bp->port.advertising = (ADVERTISED_10baseT_Half |
8595 BNX2X_ERR("NVRAM config error. "
8596 "Invalid link_config 0x%x"
8597 " speed_cap_mask 0x%x\n",
8598 bp->port.link_config,
8599 bp->link_params.speed_cap_mask);
8604 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8605 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8606 bp->link_params.req_line_speed = SPEED_100;
8607 bp->port.advertising = (ADVERTISED_100baseT_Full |
8610 BNX2X_ERR("NVRAM config error. "
8611 "Invalid link_config 0x%x"
8612 " speed_cap_mask 0x%x\n",
8613 bp->port.link_config,
8614 bp->link_params.speed_cap_mask);
8619 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8620 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8621 bp->link_params.req_line_speed = SPEED_100;
8622 bp->link_params.req_duplex = DUPLEX_HALF;
8623 bp->port.advertising = (ADVERTISED_100baseT_Half |
8626 BNX2X_ERR("NVRAM config error. "
8627 "Invalid link_config 0x%x"
8628 " speed_cap_mask 0x%x\n",
8629 bp->port.link_config,
8630 bp->link_params.speed_cap_mask);
8635 case PORT_FEATURE_LINK_SPEED_1G:
8636 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8637 bp->link_params.req_line_speed = SPEED_1000;
8638 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8641 BNX2X_ERR("NVRAM config error. "
8642 "Invalid link_config 0x%x"
8643 " speed_cap_mask 0x%x\n",
8644 bp->port.link_config,
8645 bp->link_params.speed_cap_mask);
8650 case PORT_FEATURE_LINK_SPEED_2_5G:
8651 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8652 bp->link_params.req_line_speed = SPEED_2500;
8653 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8656 BNX2X_ERR("NVRAM config error. "
8657 "Invalid link_config 0x%x"
8658 " speed_cap_mask 0x%x\n",
8659 bp->port.link_config,
8660 bp->link_params.speed_cap_mask);
8665 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8666 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8667 case PORT_FEATURE_LINK_SPEED_10G_KR:
8668 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8669 bp->link_params.req_line_speed = SPEED_10000;
8670 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8673 BNX2X_ERR("NVRAM config error. "
8674 "Invalid link_config 0x%x"
8675 " speed_cap_mask 0x%x\n",
8676 bp->port.link_config,
8677 bp->link_params.speed_cap_mask);
8683 BNX2X_ERR("NVRAM config error. "
8684 "BAD link speed link_config 0x%x\n",
8685 bp->port.link_config);
8686 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8687 bp->port.advertising = bp->port.supported;
8691 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8692 PORT_FEATURE_FLOW_CONTROL_MASK);
8693 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8694 !(bp->port.supported & SUPPORTED_Autoneg))
8695 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8697 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8698 " advertising 0x%x\n",
8699 bp->link_params.req_line_speed,
8700 bp->link_params.req_duplex,
8701 bp->link_params.req_flow_ctrl, bp->port.advertising);
8704 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8706 mac_hi = cpu_to_be16(mac_hi);
8707 mac_lo = cpu_to_be32(mac_lo);
8708 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8709 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8712 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8714 int port = BP_PORT(bp);
8720 bp->link_params.bp = bp;
8721 bp->link_params.port = port;
8723 bp->link_params.lane_config =
8724 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8725 bp->link_params.ext_phy_config =
8727 dev_info.port_hw_config[port].external_phy_config);
8728 /* BCM8727_NOC => BCM8727 no over current */
8729 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8730 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8731 bp->link_params.ext_phy_config &=
8732 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8733 bp->link_params.ext_phy_config |=
8734 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8735 bp->link_params.feature_config_flags |=
8736 FEATURE_CONFIG_BCM8727_NOC;
8739 bp->link_params.speed_cap_mask =
8741 dev_info.port_hw_config[port].speed_capability_mask);
8743 bp->port.link_config =
8744 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8746 /* Get the 4 lanes xgxs config rx and tx */
8747 for (i = 0; i < 2; i++) {
8749 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8750 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8751 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8754 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8755 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8756 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8759 /* If the device is capable of WoL, set the default state according
8762 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8763 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8764 (config & PORT_FEATURE_WOL_ENABLED));
8766 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8767 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8768 bp->link_params.lane_config,
8769 bp->link_params.ext_phy_config,
8770 bp->link_params.speed_cap_mask, bp->port.link_config);
8772 bp->link_params.switch_cfg |= (bp->port.link_config &
8773 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8774 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8776 bnx2x_link_settings_requested(bp);
8779 * If connected directly, work with the internal PHY, otherwise, work
8780 * with the external PHY
8782 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8783 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8784 bp->mdio.prtad = bp->link_params.phy_addr;
8786 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8787 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8789 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8791 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8792 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8793 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8794 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8795 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8798 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8799 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8800 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8804 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8806 int func = BP_FUNC(bp);
8810 bnx2x_get_common_hwinfo(bp);
8814 if (CHIP_IS_E1H(bp)) {
8816 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8818 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8819 FUNC_MF_CFG_E1HOV_TAG_MASK);
8820 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8822 BNX2X_DEV_INFO("%s function mode\n",
8823 IS_E1HMF(bp) ? "multi" : "single");
8826 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8828 FUNC_MF_CFG_E1HOV_TAG_MASK);
8829 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8831 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8833 func, bp->e1hov, bp->e1hov);
8835 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8836 " aborting\n", func);
8841 BNX2X_ERR("!!! VN %d in single function mode,"
8842 " aborting\n", BP_E1HVN(bp));
8848 if (!BP_NOMCP(bp)) {
8849 bnx2x_get_port_hwinfo(bp);
8851 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8852 DRV_MSG_SEQ_NUMBER_MASK);
8853 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8857 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8858 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8859 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8860 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8861 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8862 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8863 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8864 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8865 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8866 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8867 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8869 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8877 /* only supposed to happen on emulation/FPGA */
8878 BNX2X_ERR("warning random MAC workaround active\n");
8879 random_ether_addr(bp->dev->dev_addr);
8880 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8886 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8888 int func = BP_FUNC(bp);
8892 /* Disable interrupt handling until HW is initialized */
8893 atomic_set(&bp->intr_sem, 1);
8894 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8896 mutex_init(&bp->port.phy_mutex);
8897 mutex_init(&bp->fw_mb_mutex);
8899 mutex_init(&bp->cnic_mutex);
8902 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8903 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8905 rc = bnx2x_get_hwinfo(bp);
8907 /* need to reset chip if undi was active */
8909 bnx2x_undi_unload(bp);
8911 if (CHIP_REV_IS_FPGA(bp))
8912 printk(KERN_ERR PFX "FPGA detected\n");
8914 if (BP_NOMCP(bp) && (func == 0))
8916 "MCP disabled, must load devices in order!\n");
8918 /* Set multi queue mode */
8919 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8920 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8922 "Multi disabled since int_mode requested is not MSI-X\n");
8923 multi_mode = ETH_RSS_MODE_DISABLED;
8925 bp->multi_mode = multi_mode;
8930 bp->flags &= ~TPA_ENABLE_FLAG;
8931 bp->dev->features &= ~NETIF_F_LRO;
8933 bp->flags |= TPA_ENABLE_FLAG;
8934 bp->dev->features |= NETIF_F_LRO;
8938 bp->dropless_fc = 0;
8940 bp->dropless_fc = dropless_fc;
8944 bp->tx_ring_size = MAX_TX_AVAIL;
8945 bp->rx_ring_size = MAX_RX_AVAIL;
8949 /* make sure that the numbers are in the right granularity */
8950 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8951 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8953 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8954 bp->current_interval = (poll ? poll : timer_interval);
8956 init_timer(&bp->timer);
8957 bp->timer.expires = jiffies + bp->current_interval;
8958 bp->timer.data = (unsigned long) bp;
8959 bp->timer.function = bnx2x_timer;
8965 * ethtool service functions
8968 /* All ethtool functions called with rtnl_lock */
8970 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8972 struct bnx2x *bp = netdev_priv(dev);
8974 cmd->supported = bp->port.supported;
8975 cmd->advertising = bp->port.advertising;
8977 if ((bp->state == BNX2X_STATE_OPEN) &&
8978 !(bp->flags & MF_FUNC_DIS) &&
8979 (bp->link_vars.link_up)) {
8980 cmd->speed = bp->link_vars.line_speed;
8981 cmd->duplex = bp->link_vars.duplex;
8986 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8987 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8988 if (vn_max_rate < cmd->speed)
8989 cmd->speed = vn_max_rate;
8996 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8998 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9000 switch (ext_phy_type) {
9001 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9006 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9007 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9008 cmd->port = PORT_FIBRE;
9011 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9013 cmd->port = PORT_TP;
9016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9017 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9018 bp->link_params.ext_phy_config);
9022 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9023 bp->link_params.ext_phy_config);
9027 cmd->port = PORT_TP;
9029 cmd->phy_address = bp->mdio.prtad;
9030 cmd->transceiver = XCVR_INTERNAL;
9032 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9033 cmd->autoneg = AUTONEG_ENABLE;
9035 cmd->autoneg = AUTONEG_DISABLE;
9040 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9041 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9042 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9043 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9044 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9045 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9046 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9051 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9053 struct bnx2x *bp = netdev_priv(dev);
9059 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9060 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9061 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9062 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9063 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9064 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9065 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9067 if (cmd->autoneg == AUTONEG_ENABLE) {
9068 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9069 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9073 /* advertise the requested speed and duplex if supported */
9074 cmd->advertising &= bp->port.supported;
9076 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9077 bp->link_params.req_duplex = DUPLEX_FULL;
9078 bp->port.advertising |= (ADVERTISED_Autoneg |
9081 } else { /* forced speed */
9082 /* advertise the requested speed and duplex if supported */
9083 switch (cmd->speed) {
9085 if (cmd->duplex == DUPLEX_FULL) {
9086 if (!(bp->port.supported &
9087 SUPPORTED_10baseT_Full)) {
9089 "10M full not supported\n");
9093 advertising = (ADVERTISED_10baseT_Full |
9096 if (!(bp->port.supported &
9097 SUPPORTED_10baseT_Half)) {
9099 "10M half not supported\n");
9103 advertising = (ADVERTISED_10baseT_Half |
9109 if (cmd->duplex == DUPLEX_FULL) {
9110 if (!(bp->port.supported &
9111 SUPPORTED_100baseT_Full)) {
9113 "100M full not supported\n");
9117 advertising = (ADVERTISED_100baseT_Full |
9120 if (!(bp->port.supported &
9121 SUPPORTED_100baseT_Half)) {
9123 "100M half not supported\n");
9127 advertising = (ADVERTISED_100baseT_Half |
9133 if (cmd->duplex != DUPLEX_FULL) {
9134 DP(NETIF_MSG_LINK, "1G half not supported\n");
9138 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9139 DP(NETIF_MSG_LINK, "1G full not supported\n");
9143 advertising = (ADVERTISED_1000baseT_Full |
9148 if (cmd->duplex != DUPLEX_FULL) {
9150 "2.5G half not supported\n");
9154 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9156 "2.5G full not supported\n");
9160 advertising = (ADVERTISED_2500baseX_Full |
9165 if (cmd->duplex != DUPLEX_FULL) {
9166 DP(NETIF_MSG_LINK, "10G half not supported\n");
9170 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9171 DP(NETIF_MSG_LINK, "10G full not supported\n");
9175 advertising = (ADVERTISED_10000baseT_Full |
9180 DP(NETIF_MSG_LINK, "Unsupported speed\n");
9184 bp->link_params.req_line_speed = cmd->speed;
9185 bp->link_params.req_duplex = cmd->duplex;
9186 bp->port.advertising = advertising;
9189 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9190 DP_LEVEL " req_duplex %d advertising 0x%x\n",
9191 bp->link_params.req_line_speed, bp->link_params.req_duplex,
9192 bp->port.advertising);
9194 if (netif_running(dev)) {
9195 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9202 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9203 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9205 static int bnx2x_get_regs_len(struct net_device *dev)
9207 struct bnx2x *bp = netdev_priv(dev);
9208 int regdump_len = 0;
9211 if (CHIP_IS_E1(bp)) {
9212 for (i = 0; i < REGS_COUNT; i++)
9213 if (IS_E1_ONLINE(reg_addrs[i].info))
9214 regdump_len += reg_addrs[i].size;
9216 for (i = 0; i < WREGS_COUNT_E1; i++)
9217 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9218 regdump_len += wreg_addrs_e1[i].size *
9219 (1 + wreg_addrs_e1[i].read_regs_count);
9222 for (i = 0; i < REGS_COUNT; i++)
9223 if (IS_E1H_ONLINE(reg_addrs[i].info))
9224 regdump_len += reg_addrs[i].size;
9226 for (i = 0; i < WREGS_COUNT_E1H; i++)
9227 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9228 regdump_len += wreg_addrs_e1h[i].size *
9229 (1 + wreg_addrs_e1h[i].read_regs_count);
9232 regdump_len += sizeof(struct dump_hdr);
9237 static void bnx2x_get_regs(struct net_device *dev,
9238 struct ethtool_regs *regs, void *_p)
9241 struct bnx2x *bp = netdev_priv(dev);
9242 struct dump_hdr dump_hdr = {0};
9245 memset(p, 0, regs->len);
9247 if (!netif_running(bp->dev))
9250 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9251 dump_hdr.dump_sign = dump_sign_all;
9252 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9253 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9254 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9255 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9256 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9258 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9259 p += dump_hdr.hdr_size + 1;
9261 if (CHIP_IS_E1(bp)) {
9262 for (i = 0; i < REGS_COUNT; i++)
9263 if (IS_E1_ONLINE(reg_addrs[i].info))
9264 for (j = 0; j < reg_addrs[i].size; j++)
9266 reg_addrs[i].addr + j*4);
9269 for (i = 0; i < REGS_COUNT; i++)
9270 if (IS_E1H_ONLINE(reg_addrs[i].info))
9271 for (j = 0; j < reg_addrs[i].size; j++)
9273 reg_addrs[i].addr + j*4);
9277 #define PHY_FW_VER_LEN 10
9279 static void bnx2x_get_drvinfo(struct net_device *dev,
9280 struct ethtool_drvinfo *info)
9282 struct bnx2x *bp = netdev_priv(dev);
9283 u8 phy_fw_ver[PHY_FW_VER_LEN];
9285 strcpy(info->driver, DRV_MODULE_NAME);
9286 strcpy(info->version, DRV_MODULE_VERSION);
9288 phy_fw_ver[0] = '\0';
9290 bnx2x_acquire_phy_lock(bp);
9291 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9292 (bp->state != BNX2X_STATE_CLOSED),
9293 phy_fw_ver, PHY_FW_VER_LEN);
9294 bnx2x_release_phy_lock(bp);
9297 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9298 (bp->common.bc_ver & 0xff0000) >> 16,
9299 (bp->common.bc_ver & 0xff00) >> 8,
9300 (bp->common.bc_ver & 0xff),
9301 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9302 strcpy(info->bus_info, pci_name(bp->pdev));
9303 info->n_stats = BNX2X_NUM_STATS;
9304 info->testinfo_len = BNX2X_NUM_TESTS;
9305 info->eedump_len = bp->common.flash_size;
9306 info->regdump_len = bnx2x_get_regs_len(dev);
9309 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9311 struct bnx2x *bp = netdev_priv(dev);
9313 if (bp->flags & NO_WOL_FLAG) {
9317 wol->supported = WAKE_MAGIC;
9319 wol->wolopts = WAKE_MAGIC;
9323 memset(&wol->sopass, 0, sizeof(wol->sopass));
9326 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9328 struct bnx2x *bp = netdev_priv(dev);
9330 if (wol->wolopts & ~WAKE_MAGIC)
9333 if (wol->wolopts & WAKE_MAGIC) {
9334 if (bp->flags & NO_WOL_FLAG)
9344 static u32 bnx2x_get_msglevel(struct net_device *dev)
9346 struct bnx2x *bp = netdev_priv(dev);
9348 return bp->msglevel;
9351 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9353 struct bnx2x *bp = netdev_priv(dev);
9355 if (capable(CAP_NET_ADMIN))
9356 bp->msglevel = level;
9359 static int bnx2x_nway_reset(struct net_device *dev)
9361 struct bnx2x *bp = netdev_priv(dev);
9366 if (netif_running(dev)) {
9367 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9374 static u32 bnx2x_get_link(struct net_device *dev)
9376 struct bnx2x *bp = netdev_priv(dev);
9378 if (bp->flags & MF_FUNC_DIS)
9381 return bp->link_vars.link_up;
9384 static int bnx2x_get_eeprom_len(struct net_device *dev)
9386 struct bnx2x *bp = netdev_priv(dev);
9388 return bp->common.flash_size;
9391 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9393 int port = BP_PORT(bp);
9397 /* adjust timeout for emulation/FPGA */
9398 count = NVRAM_TIMEOUT_COUNT;
9399 if (CHIP_REV_IS_SLOW(bp))
9402 /* request access to nvram interface */
9403 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9404 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9406 for (i = 0; i < count*10; i++) {
9407 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9408 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9414 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9415 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9422 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9424 int port = BP_PORT(bp);
9428 /* adjust timeout for emulation/FPGA */
9429 count = NVRAM_TIMEOUT_COUNT;
9430 if (CHIP_REV_IS_SLOW(bp))
9433 /* relinquish nvram interface */
9434 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9435 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9437 for (i = 0; i < count*10; i++) {
9438 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9439 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9445 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9446 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9453 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9457 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9459 /* enable both bits, even on read */
9460 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9461 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9462 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9465 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9469 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9471 /* disable both bits, even after read */
9472 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9473 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9474 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9477 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9483 /* build the command word */
9484 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9486 /* need to clear DONE bit separately */
9487 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9489 /* address of the NVRAM to read from */
9490 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9491 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9493 /* issue a read command */
9494 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9496 /* adjust timeout for emulation/FPGA */
9497 count = NVRAM_TIMEOUT_COUNT;
9498 if (CHIP_REV_IS_SLOW(bp))
9501 /* wait for completion */
9504 for (i = 0; i < count; i++) {
9506 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9508 if (val & MCPR_NVM_COMMAND_DONE) {
9509 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9510 /* we read nvram data in cpu order
9511 * but ethtool sees it as an array of bytes
9512 * converting to big-endian will do the work */
9513 *ret_val = cpu_to_be32(val);
9522 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9529 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9531 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9536 if (offset + buf_size > bp->common.flash_size) {
9537 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9538 " buf_size (0x%x) > flash_size (0x%x)\n",
9539 offset, buf_size, bp->common.flash_size);
9543 /* request access to nvram interface */
9544 rc = bnx2x_acquire_nvram_lock(bp);
9548 /* enable access to nvram interface */
9549 bnx2x_enable_nvram_access(bp);
9551 /* read the first word(s) */
9552 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9553 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9554 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9555 memcpy(ret_buf, &val, 4);
9557 /* advance to the next dword */
9558 offset += sizeof(u32);
9559 ret_buf += sizeof(u32);
9560 buf_size -= sizeof(u32);
9565 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9566 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9567 memcpy(ret_buf, &val, 4);
9570 /* disable access to nvram interface */
9571 bnx2x_disable_nvram_access(bp);
9572 bnx2x_release_nvram_lock(bp);
9577 static int bnx2x_get_eeprom(struct net_device *dev,
9578 struct ethtool_eeprom *eeprom, u8 *eebuf)
9580 struct bnx2x *bp = netdev_priv(dev);
9583 if (!netif_running(dev))
9586 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9587 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9588 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9589 eeprom->len, eeprom->len);
9591 /* parameters already validated in ethtool_get_eeprom */
9593 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9598 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9603 /* build the command word */
9604 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9606 /* need to clear DONE bit separately */
9607 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9609 /* write the data */
9610 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9612 /* address of the NVRAM to write to */
9613 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9614 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9616 /* issue the write command */
9617 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9619 /* adjust timeout for emulation/FPGA */
9620 count = NVRAM_TIMEOUT_COUNT;
9621 if (CHIP_REV_IS_SLOW(bp))
9624 /* wait for completion */
9626 for (i = 0; i < count; i++) {
9628 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9629 if (val & MCPR_NVM_COMMAND_DONE) {
9638 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9640 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9648 if (offset + buf_size > bp->common.flash_size) {
9649 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9650 " buf_size (0x%x) > flash_size (0x%x)\n",
9651 offset, buf_size, bp->common.flash_size);
9655 /* request access to nvram interface */
9656 rc = bnx2x_acquire_nvram_lock(bp);
9660 /* enable access to nvram interface */
9661 bnx2x_enable_nvram_access(bp);
9663 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9664 align_offset = (offset & ~0x03);
9665 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9668 val &= ~(0xff << BYTE_OFFSET(offset));
9669 val |= (*data_buf << BYTE_OFFSET(offset));
9671 /* nvram data is returned as an array of bytes
9672 * convert it back to cpu order */
9673 val = be32_to_cpu(val);
9675 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9679 /* disable access to nvram interface */
9680 bnx2x_disable_nvram_access(bp);
9681 bnx2x_release_nvram_lock(bp);
9686 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9694 if (buf_size == 1) /* ethtool */
9695 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9697 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9699 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9704 if (offset + buf_size > bp->common.flash_size) {
9705 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9706 " buf_size (0x%x) > flash_size (0x%x)\n",
9707 offset, buf_size, bp->common.flash_size);
9711 /* request access to nvram interface */
9712 rc = bnx2x_acquire_nvram_lock(bp);
9716 /* enable access to nvram interface */
9717 bnx2x_enable_nvram_access(bp);
9720 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9721 while ((written_so_far < buf_size) && (rc == 0)) {
9722 if (written_so_far == (buf_size - sizeof(u32)))
9723 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9725 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9726 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9727 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9729 memcpy(&val, data_buf, 4);
9731 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9733 /* advance to the next dword */
9734 offset += sizeof(u32);
9735 data_buf += sizeof(u32);
9736 written_so_far += sizeof(u32);
9740 /* disable access to nvram interface */
9741 bnx2x_disable_nvram_access(bp);
9742 bnx2x_release_nvram_lock(bp);
9747 static int bnx2x_set_eeprom(struct net_device *dev,
9748 struct ethtool_eeprom *eeprom, u8 *eebuf)
9750 struct bnx2x *bp = netdev_priv(dev);
9751 int port = BP_PORT(bp);
9754 if (!netif_running(dev))
9757 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9758 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9759 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9760 eeprom->len, eeprom->len);
9762 /* parameters already validated in ethtool_set_eeprom */
9764 /* PHY eeprom can be accessed only by the PMF */
9765 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9769 if (eeprom->magic == 0x50485950) {
9770 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9771 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9773 bnx2x_acquire_phy_lock(bp);
9774 rc |= bnx2x_link_reset(&bp->link_params,
9776 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9777 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9778 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9779 MISC_REGISTERS_GPIO_HIGH, port);
9780 bnx2x_release_phy_lock(bp);
9781 bnx2x_link_report(bp);
9783 } else if (eeprom->magic == 0x50485952) {
9784 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9785 if (bp->state == BNX2X_STATE_OPEN) {
9786 bnx2x_acquire_phy_lock(bp);
9787 rc |= bnx2x_link_reset(&bp->link_params,
9790 rc |= bnx2x_phy_init(&bp->link_params,
9792 bnx2x_release_phy_lock(bp);
9793 bnx2x_calc_fc_adv(bp);
9795 } else if (eeprom->magic == 0x53985943) {
9796 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9797 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9798 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9800 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9802 /* DSP Remove Download Mode */
9803 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9804 MISC_REGISTERS_GPIO_LOW, port);
9806 bnx2x_acquire_phy_lock(bp);
9808 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9810 /* wait 0.5 sec to allow it to run */
9812 bnx2x_ext_phy_hw_reset(bp, port);
9814 bnx2x_release_phy_lock(bp);
9817 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9822 static int bnx2x_get_coalesce(struct net_device *dev,
9823 struct ethtool_coalesce *coal)
9825 struct bnx2x *bp = netdev_priv(dev);
9827 memset(coal, 0, sizeof(struct ethtool_coalesce));
9829 coal->rx_coalesce_usecs = bp->rx_ticks;
9830 coal->tx_coalesce_usecs = bp->tx_ticks;
9835 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9836 static int bnx2x_set_coalesce(struct net_device *dev,
9837 struct ethtool_coalesce *coal)
9839 struct bnx2x *bp = netdev_priv(dev);
9841 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9842 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9843 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9845 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9846 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9847 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9849 if (netif_running(dev))
9850 bnx2x_update_coalesce(bp);
9855 static void bnx2x_get_ringparam(struct net_device *dev,
9856 struct ethtool_ringparam *ering)
9858 struct bnx2x *bp = netdev_priv(dev);
9860 ering->rx_max_pending = MAX_RX_AVAIL;
9861 ering->rx_mini_max_pending = 0;
9862 ering->rx_jumbo_max_pending = 0;
9864 ering->rx_pending = bp->rx_ring_size;
9865 ering->rx_mini_pending = 0;
9866 ering->rx_jumbo_pending = 0;
9868 ering->tx_max_pending = MAX_TX_AVAIL;
9869 ering->tx_pending = bp->tx_ring_size;
9872 static int bnx2x_set_ringparam(struct net_device *dev,
9873 struct ethtool_ringparam *ering)
9875 struct bnx2x *bp = netdev_priv(dev);
9878 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9879 (ering->tx_pending > MAX_TX_AVAIL) ||
9880 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9883 bp->rx_ring_size = ering->rx_pending;
9884 bp->tx_ring_size = ering->tx_pending;
9886 if (netif_running(dev)) {
9887 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9888 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9894 static void bnx2x_get_pauseparam(struct net_device *dev,
9895 struct ethtool_pauseparam *epause)
9897 struct bnx2x *bp = netdev_priv(dev);
9899 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9900 BNX2X_FLOW_CTRL_AUTO) &&
9901 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9903 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9904 BNX2X_FLOW_CTRL_RX);
9905 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9906 BNX2X_FLOW_CTRL_TX);
9908 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9909 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9910 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9913 static int bnx2x_set_pauseparam(struct net_device *dev,
9914 struct ethtool_pauseparam *epause)
9916 struct bnx2x *bp = netdev_priv(dev);
9921 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9922 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9923 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9925 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9927 if (epause->rx_pause)
9928 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9930 if (epause->tx_pause)
9931 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9933 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9934 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9936 if (epause->autoneg) {
9937 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9938 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9942 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9943 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9947 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9949 if (netif_running(dev)) {
9950 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9957 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9959 struct bnx2x *bp = netdev_priv(dev);
9963 /* TPA requires Rx CSUM offloading */
9964 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9965 if (!(dev->features & NETIF_F_LRO)) {
9966 dev->features |= NETIF_F_LRO;
9967 bp->flags |= TPA_ENABLE_FLAG;
9971 } else if (dev->features & NETIF_F_LRO) {
9972 dev->features &= ~NETIF_F_LRO;
9973 bp->flags &= ~TPA_ENABLE_FLAG;
9977 if (changed && netif_running(dev)) {
9978 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9979 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9985 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9987 struct bnx2x *bp = netdev_priv(dev);
9992 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9994 struct bnx2x *bp = netdev_priv(dev);
9999 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10000 TPA'ed packets will be discarded due to wrong TCP CSUM */
10002 u32 flags = ethtool_op_get_flags(dev);
10004 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10010 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10013 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10014 dev->features |= NETIF_F_TSO6;
10016 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10017 dev->features &= ~NETIF_F_TSO6;
10023 static const struct {
10024 char string[ETH_GSTRING_LEN];
10025 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10026 { "register_test (offline)" },
10027 { "memory_test (offline)" },
10028 { "loopback_test (offline)" },
10029 { "nvram_test (online)" },
10030 { "interrupt_test (online)" },
10031 { "link_test (online)" },
10032 { "idle check (online)" }
10035 static int bnx2x_test_registers(struct bnx2x *bp)
10037 int idx, i, rc = -ENODEV;
10039 int port = BP_PORT(bp);
10040 static const struct {
10045 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10046 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10047 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10048 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10049 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10050 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10051 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10052 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10053 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10054 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10055 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10056 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10057 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10058 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10059 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10060 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10061 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10062 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
10063 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
10064 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10065 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
10066 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10067 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10068 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10069 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10070 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10071 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10072 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10073 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
10074 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10075 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
10076 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10077 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10078 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10079 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10080 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10081 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10083 { 0xffffffff, 0, 0x00000000 }
10086 if (!netif_running(bp->dev))
10089 /* Repeat the test twice:
10090 First by writing 0x00000000, second by writing 0xffffffff */
10091 for (idx = 0; idx < 2; idx++) {
10098 wr_val = 0xffffffff;
10102 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10103 u32 offset, mask, save_val, val;
10105 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10106 mask = reg_tbl[i].mask;
10108 save_val = REG_RD(bp, offset);
10110 REG_WR(bp, offset, wr_val);
10111 val = REG_RD(bp, offset);
10113 /* Restore the original register's value */
10114 REG_WR(bp, offset, save_val);
10116 /* verify that value is as expected value */
10117 if ((val & mask) != (wr_val & mask))
10118 goto test_reg_exit;
10128 static int bnx2x_test_memory(struct bnx2x *bp)
10130 int i, j, rc = -ENODEV;
10132 static const struct {
10136 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10137 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10138 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10139 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10140 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10141 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10142 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10146 static const struct {
10152 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10153 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10154 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10155 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10156 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10157 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10159 { NULL, 0xffffffff, 0, 0 }
10162 if (!netif_running(bp->dev))
10165 /* Go through all the memories */
10166 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10167 for (j = 0; j < mem_tbl[i].size; j++)
10168 REG_RD(bp, mem_tbl[i].offset + j*4);
10170 /* Check the parity status */
10171 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10172 val = REG_RD(bp, prty_tbl[i].offset);
10173 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10174 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10176 "%s is 0x%x\n", prty_tbl[i].name, val);
10177 goto test_mem_exit;
10187 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10192 while (bnx2x_link_test(bp) && cnt--)
10196 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10198 unsigned int pkt_size, num_pkts, i;
10199 struct sk_buff *skb;
10200 unsigned char *packet;
10201 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10202 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10203 u16 tx_start_idx, tx_idx;
10204 u16 rx_start_idx, rx_idx;
10205 u16 pkt_prod, bd_prod;
10206 struct sw_tx_bd *tx_buf;
10207 struct eth_tx_start_bd *tx_start_bd;
10208 struct eth_tx_parse_bd *pbd = NULL;
10209 dma_addr_t mapping;
10210 union eth_rx_cqe *cqe;
10212 struct sw_rx_bd *rx_buf;
10216 /* check the loopback mode */
10217 switch (loopback_mode) {
10218 case BNX2X_PHY_LOOPBACK:
10219 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10222 case BNX2X_MAC_LOOPBACK:
10223 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10224 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10230 /* prepare the loopback packet */
10231 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10232 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10233 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10236 goto test_loopback_exit;
10238 packet = skb_put(skb, pkt_size);
10239 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10240 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10241 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10242 for (i = ETH_HLEN; i < pkt_size; i++)
10243 packet[i] = (unsigned char) (i & 0xff);
10245 /* send the loopback packet */
10247 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10248 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10250 pkt_prod = fp_tx->tx_pkt_prod++;
10251 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10252 tx_buf->first_bd = fp_tx->tx_bd_prod;
10256 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10257 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10258 mapping = pci_map_single(bp->pdev, skb->data,
10259 skb_headlen(skb), PCI_DMA_TODEVICE);
10260 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10261 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10262 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10263 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10264 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10265 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10266 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10267 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10269 /* turn on parsing and get a BD */
10270 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10271 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10273 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10277 fp_tx->tx_db.data.prod += 2;
10279 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10284 fp_tx->tx_bd_prod += 2; /* start + pbd */
10285 bp->dev->trans_start = jiffies;
10289 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10290 if (tx_idx != tx_start_idx + num_pkts)
10291 goto test_loopback_exit;
10293 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10294 if (rx_idx != rx_start_idx + num_pkts)
10295 goto test_loopback_exit;
10297 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10298 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10299 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10300 goto test_loopback_rx_exit;
10302 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10303 if (len != pkt_size)
10304 goto test_loopback_rx_exit;
10306 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10308 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10309 for (i = ETH_HLEN; i < pkt_size; i++)
10310 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10311 goto test_loopback_rx_exit;
10315 test_loopback_rx_exit:
10317 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10318 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10319 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10320 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10322 /* Update producers */
10323 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10324 fp_rx->rx_sge_prod);
10326 test_loopback_exit:
10327 bp->link_params.loopback_mode = LOOPBACK_NONE;
10332 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10336 if (!netif_running(bp->dev))
10337 return BNX2X_LOOPBACK_FAILED;
10339 bnx2x_netif_stop(bp, 1);
10340 bnx2x_acquire_phy_lock(bp);
10342 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10344 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10345 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10348 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10350 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10351 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10354 bnx2x_release_phy_lock(bp);
10355 bnx2x_netif_start(bp);
10360 #define CRC32_RESIDUAL 0xdebb20e3
10362 static int bnx2x_test_nvram(struct bnx2x *bp)
10364 static const struct {
10368 { 0, 0x14 }, /* bootstrap */
10369 { 0x14, 0xec }, /* dir */
10370 { 0x100, 0x350 }, /* manuf_info */
10371 { 0x450, 0xf0 }, /* feature_info */
10372 { 0x640, 0x64 }, /* upgrade_key_info */
10374 { 0x708, 0x70 }, /* manuf_key_info */
10378 __be32 buf[0x350 / 4];
10379 u8 *data = (u8 *)buf;
10383 rc = bnx2x_nvram_read(bp, 0, data, 4);
10385 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10386 goto test_nvram_exit;
10389 magic = be32_to_cpu(buf[0]);
10390 if (magic != 0x669955aa) {
10391 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10393 goto test_nvram_exit;
10396 for (i = 0; nvram_tbl[i].size; i++) {
10398 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10399 nvram_tbl[i].size);
10401 DP(NETIF_MSG_PROBE,
10402 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10403 goto test_nvram_exit;
10406 crc = ether_crc_le(nvram_tbl[i].size, data);
10407 if (crc != CRC32_RESIDUAL) {
10408 DP(NETIF_MSG_PROBE,
10409 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10411 goto test_nvram_exit;
10419 static int bnx2x_test_intr(struct bnx2x *bp)
10421 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10424 if (!netif_running(bp->dev))
10427 config->hdr.length = 0;
10428 if (CHIP_IS_E1(bp))
10429 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10431 config->hdr.offset = BP_FUNC(bp);
10432 config->hdr.client_id = bp->fp->cl_id;
10433 config->hdr.reserved1 = 0;
10435 bp->set_mac_pending++;
10437 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10438 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10439 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10441 for (i = 0; i < 10; i++) {
10442 if (!bp->set_mac_pending)
10445 msleep_interruptible(10);
10454 static void bnx2x_self_test(struct net_device *dev,
10455 struct ethtool_test *etest, u64 *buf)
10457 struct bnx2x *bp = netdev_priv(dev);
10459 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10461 if (!netif_running(dev))
10464 /* offline tests are not supported in MF mode */
10466 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10468 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10469 int port = BP_PORT(bp);
10473 /* save current value of input enable for TX port IF */
10474 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10475 /* disable input for TX port IF */
10476 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10478 link_up = (bnx2x_link_test(bp) == 0);
10479 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10480 bnx2x_nic_load(bp, LOAD_DIAG);
10481 /* wait until link state is restored */
10482 bnx2x_wait_for_link(bp, link_up);
10484 if (bnx2x_test_registers(bp) != 0) {
10486 etest->flags |= ETH_TEST_FL_FAILED;
10488 if (bnx2x_test_memory(bp) != 0) {
10490 etest->flags |= ETH_TEST_FL_FAILED;
10492 buf[2] = bnx2x_test_loopback(bp, link_up);
10494 etest->flags |= ETH_TEST_FL_FAILED;
10496 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10498 /* restore input for TX port IF */
10499 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10501 bnx2x_nic_load(bp, LOAD_NORMAL);
10502 /* wait until link state is restored */
10503 bnx2x_wait_for_link(bp, link_up);
10505 if (bnx2x_test_nvram(bp) != 0) {
10507 etest->flags |= ETH_TEST_FL_FAILED;
10509 if (bnx2x_test_intr(bp) != 0) {
10511 etest->flags |= ETH_TEST_FL_FAILED;
10514 if (bnx2x_link_test(bp) != 0) {
10516 etest->flags |= ETH_TEST_FL_FAILED;
10519 #ifdef BNX2X_EXTRA_DEBUG
10520 bnx2x_panic_dump(bp);
10524 static const struct {
10527 u8 string[ETH_GSTRING_LEN];
10528 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10529 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10530 { Q_STATS_OFFSET32(error_bytes_received_hi),
10531 8, "[%d]: rx_error_bytes" },
10532 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10533 8, "[%d]: rx_ucast_packets" },
10534 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10535 8, "[%d]: rx_mcast_packets" },
10536 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10537 8, "[%d]: rx_bcast_packets" },
10538 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10539 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10540 4, "[%d]: rx_phy_ip_err_discards"},
10541 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10542 4, "[%d]: rx_skb_alloc_discard" },
10543 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10545 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10546 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10547 8, "[%d]: tx_packets" }
10550 static const struct {
10554 #define STATS_FLAGS_PORT 1
10555 #define STATS_FLAGS_FUNC 2
10556 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10557 u8 string[ETH_GSTRING_LEN];
10558 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10559 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10560 8, STATS_FLAGS_BOTH, "rx_bytes" },
10561 { STATS_OFFSET32(error_bytes_received_hi),
10562 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10563 { STATS_OFFSET32(total_unicast_packets_received_hi),
10564 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10565 { STATS_OFFSET32(total_multicast_packets_received_hi),
10566 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10567 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10568 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10569 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10570 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10571 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10572 8, STATS_FLAGS_PORT, "rx_align_errors" },
10573 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10574 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10575 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10576 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10577 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10578 8, STATS_FLAGS_PORT, "rx_fragments" },
10579 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10580 8, STATS_FLAGS_PORT, "rx_jabbers" },
10581 { STATS_OFFSET32(no_buff_discard_hi),
10582 8, STATS_FLAGS_BOTH, "rx_discards" },
10583 { STATS_OFFSET32(mac_filter_discard),
10584 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10585 { STATS_OFFSET32(xxoverflow_discard),
10586 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10587 { STATS_OFFSET32(brb_drop_hi),
10588 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10589 { STATS_OFFSET32(brb_truncate_hi),
10590 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10591 { STATS_OFFSET32(pause_frames_received_hi),
10592 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10593 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10594 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10595 { STATS_OFFSET32(nig_timer_max),
10596 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10597 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10598 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10599 { STATS_OFFSET32(rx_skb_alloc_failed),
10600 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10601 { STATS_OFFSET32(hw_csum_err),
10602 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10604 { STATS_OFFSET32(total_bytes_transmitted_hi),
10605 8, STATS_FLAGS_BOTH, "tx_bytes" },
10606 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10607 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10608 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10609 8, STATS_FLAGS_BOTH, "tx_packets" },
10610 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10611 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10612 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10613 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10614 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10615 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10616 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10617 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10618 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10619 8, STATS_FLAGS_PORT, "tx_deferred" },
10620 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10621 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10622 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10623 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10624 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10625 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10626 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10627 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10628 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10629 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10630 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10631 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10632 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10633 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10634 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10635 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10636 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10637 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10638 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10639 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10640 { STATS_OFFSET32(pause_frames_sent_hi),
10641 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10644 #define IS_PORT_STAT(i) \
10645 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10646 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10647 #define IS_E1HMF_MODE_STAT(bp) \
10648 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10650 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10652 struct bnx2x *bp = netdev_priv(dev);
10655 switch(stringset) {
10657 if (is_multi(bp)) {
10658 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10659 if (!IS_E1HMF_MODE_STAT(bp))
10660 num_stats += BNX2X_NUM_STATS;
10662 if (IS_E1HMF_MODE_STAT(bp)) {
10664 for (i = 0; i < BNX2X_NUM_STATS; i++)
10665 if (IS_FUNC_STAT(i))
10668 num_stats = BNX2X_NUM_STATS;
10673 return BNX2X_NUM_TESTS;
10680 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10682 struct bnx2x *bp = netdev_priv(dev);
10685 switch (stringset) {
10687 if (is_multi(bp)) {
10689 for_each_queue(bp, i) {
10690 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10691 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10692 bnx2x_q_stats_arr[j].string, i);
10693 k += BNX2X_NUM_Q_STATS;
10695 if (IS_E1HMF_MODE_STAT(bp))
10697 for (j = 0; j < BNX2X_NUM_STATS; j++)
10698 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10699 bnx2x_stats_arr[j].string);
10701 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10702 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10704 strcpy(buf + j*ETH_GSTRING_LEN,
10705 bnx2x_stats_arr[i].string);
10712 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10717 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10718 struct ethtool_stats *stats, u64 *buf)
10720 struct bnx2x *bp = netdev_priv(dev);
10721 u32 *hw_stats, *offset;
10724 if (is_multi(bp)) {
10726 for_each_queue(bp, i) {
10727 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10728 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10729 if (bnx2x_q_stats_arr[j].size == 0) {
10730 /* skip this counter */
10734 offset = (hw_stats +
10735 bnx2x_q_stats_arr[j].offset);
10736 if (bnx2x_q_stats_arr[j].size == 4) {
10737 /* 4-byte counter */
10738 buf[k + j] = (u64) *offset;
10741 /* 8-byte counter */
10742 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10744 k += BNX2X_NUM_Q_STATS;
10746 if (IS_E1HMF_MODE_STAT(bp))
10748 hw_stats = (u32 *)&bp->eth_stats;
10749 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10750 if (bnx2x_stats_arr[j].size == 0) {
10751 /* skip this counter */
10755 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10756 if (bnx2x_stats_arr[j].size == 4) {
10757 /* 4-byte counter */
10758 buf[k + j] = (u64) *offset;
10761 /* 8-byte counter */
10762 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10765 hw_stats = (u32 *)&bp->eth_stats;
10766 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10767 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10769 if (bnx2x_stats_arr[i].size == 0) {
10770 /* skip this counter */
10775 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10776 if (bnx2x_stats_arr[i].size == 4) {
10777 /* 4-byte counter */
10778 buf[j] = (u64) *offset;
10782 /* 8-byte counter */
10783 buf[j] = HILO_U64(*offset, *(offset + 1));
10789 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10791 struct bnx2x *bp = netdev_priv(dev);
10794 if (!netif_running(dev))
10803 for (i = 0; i < (data * 2); i++) {
10805 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10808 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10810 msleep_interruptible(500);
10811 if (signal_pending(current))
10815 if (bp->link_vars.link_up)
10816 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10817 bp->link_vars.line_speed);
10822 static const struct ethtool_ops bnx2x_ethtool_ops = {
10823 .get_settings = bnx2x_get_settings,
10824 .set_settings = bnx2x_set_settings,
10825 .get_drvinfo = bnx2x_get_drvinfo,
10826 .get_regs_len = bnx2x_get_regs_len,
10827 .get_regs = bnx2x_get_regs,
10828 .get_wol = bnx2x_get_wol,
10829 .set_wol = bnx2x_set_wol,
10830 .get_msglevel = bnx2x_get_msglevel,
10831 .set_msglevel = bnx2x_set_msglevel,
10832 .nway_reset = bnx2x_nway_reset,
10833 .get_link = bnx2x_get_link,
10834 .get_eeprom_len = bnx2x_get_eeprom_len,
10835 .get_eeprom = bnx2x_get_eeprom,
10836 .set_eeprom = bnx2x_set_eeprom,
10837 .get_coalesce = bnx2x_get_coalesce,
10838 .set_coalesce = bnx2x_set_coalesce,
10839 .get_ringparam = bnx2x_get_ringparam,
10840 .set_ringparam = bnx2x_set_ringparam,
10841 .get_pauseparam = bnx2x_get_pauseparam,
10842 .set_pauseparam = bnx2x_set_pauseparam,
10843 .get_rx_csum = bnx2x_get_rx_csum,
10844 .set_rx_csum = bnx2x_set_rx_csum,
10845 .get_tx_csum = ethtool_op_get_tx_csum,
10846 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10847 .set_flags = bnx2x_set_flags,
10848 .get_flags = ethtool_op_get_flags,
10849 .get_sg = ethtool_op_get_sg,
10850 .set_sg = ethtool_op_set_sg,
10851 .get_tso = ethtool_op_get_tso,
10852 .set_tso = bnx2x_set_tso,
10853 .self_test = bnx2x_self_test,
10854 .get_sset_count = bnx2x_get_sset_count,
10855 .get_strings = bnx2x_get_strings,
10856 .phys_id = bnx2x_phys_id,
10857 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10860 /* end of ethtool_ops */
10862 /****************************************************************************
10863 * General service functions
10864 ****************************************************************************/
10866 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10870 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10874 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10875 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10876 PCI_PM_CTRL_PME_STATUS));
10878 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10879 /* delay required during transition out of D3hot */
10884 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10888 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10890 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10893 /* No more memory access after this point until
10894 * device is brought back to D0.
10904 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10908 /* Tell compiler that status block fields can change */
10910 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10911 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10913 return (fp->rx_comp_cons != rx_cons_sb);
10917 * net_device service functions
10920 static int bnx2x_poll(struct napi_struct *napi, int budget)
10923 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10925 struct bnx2x *bp = fp->bp;
10928 #ifdef BNX2X_STOP_ON_ERROR
10929 if (unlikely(bp->panic)) {
10930 napi_complete(napi);
10935 if (bnx2x_has_tx_work(fp))
10938 if (bnx2x_has_rx_work(fp)) {
10939 work_done += bnx2x_rx_int(fp, budget - work_done);
10941 /* must not complete if we consumed full budget */
10942 if (work_done >= budget)
10946 /* Fall out from the NAPI loop if needed */
10947 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10948 bnx2x_update_fpsb_idx(fp);
10949 /* bnx2x_has_rx_work() reads the status block, thus we need
10950 * to ensure that status block indices have been actually read
10951 * (bnx2x_update_fpsb_idx) prior to this check
10952 * (bnx2x_has_rx_work) so that we won't write the "newer"
10953 * value of the status block to IGU (if there was a DMA right
10954 * after bnx2x_has_rx_work and if there is no rmb, the memory
10955 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10956 * before bnx2x_ack_sb). In this case there will never be
10957 * another interrupt until there is another update of the
10958 * status block, while there is still unhandled work.
10962 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10963 napi_complete(napi);
10964 /* Re-enable interrupts */
10965 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10966 le16_to_cpu(fp->fp_c_idx),
10968 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10969 le16_to_cpu(fp->fp_u_idx),
10970 IGU_INT_ENABLE, 1);
10980 /* we split the first BD into headers and data BDs
10981 * to ease the pain of our fellow microcode engineers
10982 * we use one mapping for both BDs
10983 * So far this has only been observed to happen
10984 * in Other Operating Systems(TM)
10986 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10987 struct bnx2x_fastpath *fp,
10988 struct sw_tx_bd *tx_buf,
10989 struct eth_tx_start_bd **tx_bd, u16 hlen,
10990 u16 bd_prod, int nbd)
10992 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10993 struct eth_tx_bd *d_tx_bd;
10994 dma_addr_t mapping;
10995 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10997 /* first fix first BD */
10998 h_tx_bd->nbd = cpu_to_le16(nbd);
10999 h_tx_bd->nbytes = cpu_to_le16(hlen);
11001 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11002 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11003 h_tx_bd->addr_lo, h_tx_bd->nbd);
11005 /* now get a new data BD
11006 * (after the pbd) and fill it */
11007 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11008 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11010 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11011 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11013 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11014 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11015 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11017 /* this marks the BD as one that has no individual mapping */
11018 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11020 DP(NETIF_MSG_TX_QUEUED,
11021 "TSO split data size is %d (%x:%x)\n",
11022 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11025 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11030 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11033 csum = (u16) ~csum_fold(csum_sub(csum,
11034 csum_partial(t_header - fix, fix, 0)));
11037 csum = (u16) ~csum_fold(csum_add(csum,
11038 csum_partial(t_header, -fix, 0)));
11040 return swab16(csum);
11043 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11047 if (skb->ip_summed != CHECKSUM_PARTIAL)
11051 if (skb->protocol == htons(ETH_P_IPV6)) {
11053 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11054 rc |= XMIT_CSUM_TCP;
11058 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11059 rc |= XMIT_CSUM_TCP;
11063 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11064 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
11066 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11067 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
11072 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11073 /* check if packet requires linearization (packet is too fragmented)
11074 no need to check fragmentation if page size > 8K (there will be no
11075 violation to FW restrictions) */
11076 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11081 int first_bd_sz = 0;
11083 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11084 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11086 if (xmit_type & XMIT_GSO) {
11087 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11088 /* Check if LSO packet needs to be copied:
11089 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11090 int wnd_size = MAX_FETCH_BD - 3;
11091 /* Number of windows to check */
11092 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11097 /* Headers length */
11098 hlen = (int)(skb_transport_header(skb) - skb->data) +
11101 /* Amount of data (w/o headers) on linear part of SKB*/
11102 first_bd_sz = skb_headlen(skb) - hlen;
11104 wnd_sum = first_bd_sz;
11106 /* Calculate the first sum - it's special */
11107 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11109 skb_shinfo(skb)->frags[frag_idx].size;
11111 /* If there was data on linear skb data - check it */
11112 if (first_bd_sz > 0) {
11113 if (unlikely(wnd_sum < lso_mss)) {
11118 wnd_sum -= first_bd_sz;
11121 /* Others are easier: run through the frag list and
11122 check all windows */
11123 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11125 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11127 if (unlikely(wnd_sum < lso_mss)) {
11132 skb_shinfo(skb)->frags[wnd_idx].size;
11135 /* in non-LSO too fragmented packet should always
11142 if (unlikely(to_copy))
11143 DP(NETIF_MSG_TX_QUEUED,
11144 "Linearization IS REQUIRED for %s packet. "
11145 "num_frags %d hlen %d first_bd_sz %d\n",
11146 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11147 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11153 /* called with netif_tx_lock
11154 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11155 * netif_wake_queue()
11157 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11159 struct bnx2x *bp = netdev_priv(dev);
11160 struct bnx2x_fastpath *fp;
11161 struct netdev_queue *txq;
11162 struct sw_tx_bd *tx_buf;
11163 struct eth_tx_start_bd *tx_start_bd;
11164 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11165 struct eth_tx_parse_bd *pbd = NULL;
11166 u16 pkt_prod, bd_prod;
11168 dma_addr_t mapping;
11169 u32 xmit_type = bnx2x_xmit_type(bp, skb);
11172 __le16 pkt_size = 0;
11174 #ifdef BNX2X_STOP_ON_ERROR
11175 if (unlikely(bp->panic))
11176 return NETDEV_TX_BUSY;
11179 fp_index = skb_get_queue_mapping(skb);
11180 txq = netdev_get_tx_queue(dev, fp_index);
11182 fp = &bp->fp[fp_index];
11184 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11185 fp->eth_q_stats.driver_xoff++;
11186 netif_tx_stop_queue(txq);
11187 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11188 return NETDEV_TX_BUSY;
11191 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11192 " gso type %x xmit_type %x\n",
11193 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11194 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11196 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11197 /* First, check if we need to linearize the skb (due to FW
11198 restrictions). No need to check fragmentation if page size > 8K
11199 (there will be no violation to FW restrictions) */
11200 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11201 /* Statistics of linearization */
11203 if (skb_linearize(skb) != 0) {
11204 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11205 "silently dropping this SKB\n");
11206 dev_kfree_skb_any(skb);
11207 return NETDEV_TX_OK;
11213 Please read carefully. First we use one BD which we mark as start,
11214 then we have a parsing info BD (used for TSO or xsum),
11215 and only then we have the rest of the TSO BDs.
11216 (don't forget to mark the last one as last,
11217 and to unmap only AFTER you write to the BD ...)
11218 And above all, all pdb sizes are in words - NOT DWORDS!
11221 pkt_prod = fp->tx_pkt_prod++;
11222 bd_prod = TX_BD(fp->tx_bd_prod);
11224 /* get a tx_buf and first BD */
11225 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11226 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11228 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11229 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11230 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11232 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11234 /* remember the first BD of the packet */
11235 tx_buf->first_bd = fp->tx_bd_prod;
11239 DP(NETIF_MSG_TX_QUEUED,
11240 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11241 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11244 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11245 (bp->flags & HW_VLAN_TX_FLAG)) {
11246 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11247 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11250 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11252 /* turn on parsing and get a BD */
11253 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11254 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11256 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11258 if (xmit_type & XMIT_CSUM) {
11259 hlen = (skb_network_header(skb) - skb->data) / 2;
11261 /* for now NS flag is not used in Linux */
11263 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11264 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11266 pbd->ip_hlen = (skb_transport_header(skb) -
11267 skb_network_header(skb)) / 2;
11269 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11271 pbd->total_hlen = cpu_to_le16(hlen);
11274 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11276 if (xmit_type & XMIT_CSUM_V4)
11277 tx_start_bd->bd_flags.as_bitfield |=
11278 ETH_TX_BD_FLAGS_IP_CSUM;
11280 tx_start_bd->bd_flags.as_bitfield |=
11281 ETH_TX_BD_FLAGS_IPV6;
11283 if (xmit_type & XMIT_CSUM_TCP) {
11284 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11287 s8 fix = SKB_CS_OFF(skb); /* signed! */
11289 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11291 DP(NETIF_MSG_TX_QUEUED,
11292 "hlen %d fix %d csum before fix %x\n",
11293 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11295 /* HW bug: fixup the CSUM */
11296 pbd->tcp_pseudo_csum =
11297 bnx2x_csum_fix(skb_transport_header(skb),
11300 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11301 pbd->tcp_pseudo_csum);
11305 mapping = pci_map_single(bp->pdev, skb->data,
11306 skb_headlen(skb), PCI_DMA_TODEVICE);
11308 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11309 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11310 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11311 tx_start_bd->nbd = cpu_to_le16(nbd);
11312 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11313 pkt_size = tx_start_bd->nbytes;
11315 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11316 " nbytes %d flags %x vlan %x\n",
11317 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11318 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11319 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11321 if (xmit_type & XMIT_GSO) {
11323 DP(NETIF_MSG_TX_QUEUED,
11324 "TSO packet len %d hlen %d total len %d tso size %d\n",
11325 skb->len, hlen, skb_headlen(skb),
11326 skb_shinfo(skb)->gso_size);
11328 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11330 if (unlikely(skb_headlen(skb) > hlen))
11331 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11332 hlen, bd_prod, ++nbd);
11334 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11335 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11336 pbd->tcp_flags = pbd_tcp_flags(skb);
11338 if (xmit_type & XMIT_GSO_V4) {
11339 pbd->ip_id = swab16(ip_hdr(skb)->id);
11340 pbd->tcp_pseudo_csum =
11341 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11342 ip_hdr(skb)->daddr,
11343 0, IPPROTO_TCP, 0));
11346 pbd->tcp_pseudo_csum =
11347 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11348 &ipv6_hdr(skb)->daddr,
11349 0, IPPROTO_TCP, 0));
11351 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11353 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11355 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11356 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11358 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11359 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11360 if (total_pkt_bd == NULL)
11361 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11363 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11364 frag->size, PCI_DMA_TODEVICE);
11366 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11367 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11368 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11369 le16_add_cpu(&pkt_size, frag->size);
11371 DP(NETIF_MSG_TX_QUEUED,
11372 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11373 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11374 le16_to_cpu(tx_data_bd->nbytes));
11377 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11379 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11381 /* now send a tx doorbell, counting the next BD
11382 * if the packet contains or ends with it
11384 if (TX_BD_POFF(bd_prod) < nbd)
11387 if (total_pkt_bd != NULL)
11388 total_pkt_bd->total_pkt_bytes = pkt_size;
11391 DP(NETIF_MSG_TX_QUEUED,
11392 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11393 " tcp_flags %x xsum %x seq %u hlen %u\n",
11394 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11395 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11396 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11398 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11401 * Make sure that the BD data is updated before updating the producer
11402 * since FW might read the BD right after the producer is updated.
11403 * This is only applicable for weak-ordered memory model archs such
11404 * as IA-64. The following barrier is also mandatory since FW will
11405 * assumes packets must have BDs.
11409 fp->tx_db.data.prod += nbd;
11411 DOORBELL(bp, fp->index, fp->tx_db.raw);
11415 fp->tx_bd_prod += nbd;
11417 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11418 netif_tx_stop_queue(txq);
11419 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11420 if we put Tx into XOFF state. */
11422 fp->eth_q_stats.driver_xoff++;
11423 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11424 netif_tx_wake_queue(txq);
11428 return NETDEV_TX_OK;
11431 /* called with rtnl_lock */
11432 static int bnx2x_open(struct net_device *dev)
11434 struct bnx2x *bp = netdev_priv(dev);
11436 netif_carrier_off(dev);
11438 bnx2x_set_power_state(bp, PCI_D0);
11440 return bnx2x_nic_load(bp, LOAD_OPEN);
11443 /* called with rtnl_lock */
11444 static int bnx2x_close(struct net_device *dev)
11446 struct bnx2x *bp = netdev_priv(dev);
11448 /* Unload the driver, release IRQs */
11449 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11450 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11451 if (!CHIP_REV_IS_SLOW(bp))
11452 bnx2x_set_power_state(bp, PCI_D3hot);
11457 /* called with netif_tx_lock from dev_mcast.c */
11458 static void bnx2x_set_rx_mode(struct net_device *dev)
11460 struct bnx2x *bp = netdev_priv(dev);
11461 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11462 int port = BP_PORT(bp);
11464 if (bp->state != BNX2X_STATE_OPEN) {
11465 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11469 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11471 if (dev->flags & IFF_PROMISC)
11472 rx_mode = BNX2X_RX_MODE_PROMISC;
11474 else if ((dev->flags & IFF_ALLMULTI) ||
11475 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11476 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11478 else { /* some multicasts */
11479 if (CHIP_IS_E1(bp)) {
11480 int i, old, offset;
11481 struct dev_mc_list *mclist;
11482 struct mac_configuration_cmd *config =
11483 bnx2x_sp(bp, mcast_config);
11485 for (i = 0, mclist = dev->mc_list;
11486 mclist && (i < dev->mc_count);
11487 i++, mclist = mclist->next) {
11489 config->config_table[i].
11490 cam_entry.msb_mac_addr =
11491 swab16(*(u16 *)&mclist->dmi_addr[0]);
11492 config->config_table[i].
11493 cam_entry.middle_mac_addr =
11494 swab16(*(u16 *)&mclist->dmi_addr[2]);
11495 config->config_table[i].
11496 cam_entry.lsb_mac_addr =
11497 swab16(*(u16 *)&mclist->dmi_addr[4]);
11498 config->config_table[i].cam_entry.flags =
11500 config->config_table[i].
11501 target_table_entry.flags = 0;
11502 config->config_table[i].target_table_entry.
11503 clients_bit_vector =
11504 cpu_to_le32(1 << BP_L_ID(bp));
11505 config->config_table[i].
11506 target_table_entry.vlan_id = 0;
11509 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11510 config->config_table[i].
11511 cam_entry.msb_mac_addr,
11512 config->config_table[i].
11513 cam_entry.middle_mac_addr,
11514 config->config_table[i].
11515 cam_entry.lsb_mac_addr);
11517 old = config->hdr.length;
11519 for (; i < old; i++) {
11520 if (CAM_IS_INVALID(config->
11521 config_table[i])) {
11522 /* already invalidated */
11526 CAM_INVALIDATE(config->
11531 if (CHIP_REV_IS_SLOW(bp))
11532 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11534 offset = BNX2X_MAX_MULTICAST*(1 + port);
11536 config->hdr.length = i;
11537 config->hdr.offset = offset;
11538 config->hdr.client_id = bp->fp->cl_id;
11539 config->hdr.reserved1 = 0;
11541 bp->set_mac_pending++;
11544 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11545 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11546 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11549 /* Accept one or more multicasts */
11550 struct dev_mc_list *mclist;
11551 u32 mc_filter[MC_HASH_SIZE];
11552 u32 crc, bit, regidx;
11555 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11557 for (i = 0, mclist = dev->mc_list;
11558 mclist && (i < dev->mc_count);
11559 i++, mclist = mclist->next) {
11561 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11564 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11565 bit = (crc >> 24) & 0xff;
11568 mc_filter[regidx] |= (1 << bit);
11571 for (i = 0; i < MC_HASH_SIZE; i++)
11572 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11577 bp->rx_mode = rx_mode;
11578 bnx2x_set_storm_rx_mode(bp);
11581 /* called with rtnl_lock */
11582 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11584 struct sockaddr *addr = p;
11585 struct bnx2x *bp = netdev_priv(dev);
11587 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11590 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11591 if (netif_running(dev)) {
11592 if (CHIP_IS_E1(bp))
11593 bnx2x_set_eth_mac_addr_e1(bp, 1);
11595 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11601 /* called with rtnl_lock */
11602 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11603 int devad, u16 addr)
11605 struct bnx2x *bp = netdev_priv(netdev);
11608 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11610 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11611 prtad, devad, addr);
11613 if (prtad != bp->mdio.prtad) {
11614 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11615 prtad, bp->mdio.prtad);
11619 /* The HW expects different devad if CL22 is used */
11620 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11622 bnx2x_acquire_phy_lock(bp);
11623 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11624 devad, addr, &value);
11625 bnx2x_release_phy_lock(bp);
11626 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11633 /* called with rtnl_lock */
11634 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11635 u16 addr, u16 value)
11637 struct bnx2x *bp = netdev_priv(netdev);
11638 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11641 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11642 " value 0x%x\n", prtad, devad, addr, value);
11644 if (prtad != bp->mdio.prtad) {
11645 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11646 prtad, bp->mdio.prtad);
11650 /* The HW expects different devad if CL22 is used */
11651 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11653 bnx2x_acquire_phy_lock(bp);
11654 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11655 devad, addr, value);
11656 bnx2x_release_phy_lock(bp);
11660 /* called with rtnl_lock */
11661 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11663 struct bnx2x *bp = netdev_priv(dev);
11664 struct mii_ioctl_data *mdio = if_mii(ifr);
11666 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11667 mdio->phy_id, mdio->reg_num, mdio->val_in);
11669 if (!netif_running(dev))
11672 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11675 /* called with rtnl_lock */
11676 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11678 struct bnx2x *bp = netdev_priv(dev);
11681 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11682 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11685 /* This does not race with packet allocation
11686 * because the actual alloc size is
11687 * only updated as part of load
11689 dev->mtu = new_mtu;
11691 if (netif_running(dev)) {
11692 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11693 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11699 static void bnx2x_tx_timeout(struct net_device *dev)
11701 struct bnx2x *bp = netdev_priv(dev);
11703 #ifdef BNX2X_STOP_ON_ERROR
11707 /* This allows the netif to be shutdown gracefully before resetting */
11708 schedule_work(&bp->reset_task);
11712 /* called with rtnl_lock */
11713 static void bnx2x_vlan_rx_register(struct net_device *dev,
11714 struct vlan_group *vlgrp)
11716 struct bnx2x *bp = netdev_priv(dev);
11720 /* Set flags according to the required capabilities */
11721 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11723 if (dev->features & NETIF_F_HW_VLAN_TX)
11724 bp->flags |= HW_VLAN_TX_FLAG;
11726 if (dev->features & NETIF_F_HW_VLAN_RX)
11727 bp->flags |= HW_VLAN_RX_FLAG;
11729 if (netif_running(dev))
11730 bnx2x_set_client_config(bp);
11735 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11736 static void poll_bnx2x(struct net_device *dev)
11738 struct bnx2x *bp = netdev_priv(dev);
11740 disable_irq(bp->pdev->irq);
11741 bnx2x_interrupt(bp->pdev->irq, dev);
11742 enable_irq(bp->pdev->irq);
11746 static const struct net_device_ops bnx2x_netdev_ops = {
11747 .ndo_open = bnx2x_open,
11748 .ndo_stop = bnx2x_close,
11749 .ndo_start_xmit = bnx2x_start_xmit,
11750 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11751 .ndo_set_mac_address = bnx2x_change_mac_addr,
11752 .ndo_validate_addr = eth_validate_addr,
11753 .ndo_do_ioctl = bnx2x_ioctl,
11754 .ndo_change_mtu = bnx2x_change_mtu,
11755 .ndo_tx_timeout = bnx2x_tx_timeout,
11757 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11759 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11760 .ndo_poll_controller = poll_bnx2x,
11764 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11765 struct net_device *dev)
11770 SET_NETDEV_DEV(dev, &pdev->dev);
11771 bp = netdev_priv(dev);
11776 bp->func = PCI_FUNC(pdev->devfn);
11778 rc = pci_enable_device(pdev);
11780 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11784 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11785 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11788 goto err_out_disable;
11791 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11792 printk(KERN_ERR PFX "Cannot find second PCI device"
11793 " base address, aborting\n");
11795 goto err_out_disable;
11798 if (atomic_read(&pdev->enable_cnt) == 1) {
11799 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11801 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11803 goto err_out_disable;
11806 pci_set_master(pdev);
11807 pci_save_state(pdev);
11810 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11811 if (bp->pm_cap == 0) {
11812 printk(KERN_ERR PFX "Cannot find power management"
11813 " capability, aborting\n");
11815 goto err_out_release;
11818 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11819 if (bp->pcie_cap == 0) {
11820 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11823 goto err_out_release;
11826 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11827 bp->flags |= USING_DAC_FLAG;
11828 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11829 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11830 " failed, aborting\n");
11832 goto err_out_release;
11835 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11836 printk(KERN_ERR PFX "System does not support DMA,"
11839 goto err_out_release;
11842 dev->mem_start = pci_resource_start(pdev, 0);
11843 dev->base_addr = dev->mem_start;
11844 dev->mem_end = pci_resource_end(pdev, 0);
11846 dev->irq = pdev->irq;
11848 bp->regview = pci_ioremap_bar(pdev, 0);
11849 if (!bp->regview) {
11850 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11852 goto err_out_release;
11855 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11856 min_t(u64, BNX2X_DB_SIZE,
11857 pci_resource_len(pdev, 2)));
11858 if (!bp->doorbells) {
11859 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11861 goto err_out_unmap;
11864 bnx2x_set_power_state(bp, PCI_D0);
11866 /* clean indirect addresses */
11867 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11868 PCICFG_VENDOR_ID_OFFSET);
11869 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11870 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11871 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11872 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11874 dev->watchdog_timeo = TX_TIMEOUT;
11876 dev->netdev_ops = &bnx2x_netdev_ops;
11877 dev->ethtool_ops = &bnx2x_ethtool_ops;
11878 dev->features |= NETIF_F_SG;
11879 dev->features |= NETIF_F_HW_CSUM;
11880 if (bp->flags & USING_DAC_FLAG)
11881 dev->features |= NETIF_F_HIGHDMA;
11882 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11883 dev->features |= NETIF_F_TSO6;
11885 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11886 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11888 dev->vlan_features |= NETIF_F_SG;
11889 dev->vlan_features |= NETIF_F_HW_CSUM;
11890 if (bp->flags & USING_DAC_FLAG)
11891 dev->vlan_features |= NETIF_F_HIGHDMA;
11892 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11893 dev->vlan_features |= NETIF_F_TSO6;
11896 /* get_port_hwinfo() will set prtad and mmds properly */
11897 bp->mdio.prtad = MDIO_PRTAD_NONE;
11899 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11900 bp->mdio.dev = dev;
11901 bp->mdio.mdio_read = bnx2x_mdio_read;
11902 bp->mdio.mdio_write = bnx2x_mdio_write;
11908 iounmap(bp->regview);
11909 bp->regview = NULL;
11911 if (bp->doorbells) {
11912 iounmap(bp->doorbells);
11913 bp->doorbells = NULL;
11917 if (atomic_read(&pdev->enable_cnt) == 1)
11918 pci_release_regions(pdev);
11921 pci_disable_device(pdev);
11922 pci_set_drvdata(pdev, NULL);
11928 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11929 int *width, int *speed)
11931 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11933 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11935 /* return value of 1=2.5GHz 2=5GHz */
11936 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11939 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11941 const struct firmware *firmware = bp->firmware;
11942 struct bnx2x_fw_file_hdr *fw_hdr;
11943 struct bnx2x_fw_file_section *sections;
11944 u32 offset, len, num_ops;
11949 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11952 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11953 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11955 /* Make sure none of the offsets and sizes make us read beyond
11956 * the end of the firmware data */
11957 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11958 offset = be32_to_cpu(sections[i].offset);
11959 len = be32_to_cpu(sections[i].len);
11960 if (offset + len > firmware->size) {
11961 printk(KERN_ERR PFX "Section %d length is out of "
11967 /* Likewise for the init_ops offsets */
11968 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11969 ops_offsets = (u16 *)(firmware->data + offset);
11970 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11972 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11973 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11974 printk(KERN_ERR PFX "Section offset %d is out of "
11980 /* Check FW version */
11981 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11982 fw_ver = firmware->data + offset;
11983 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11984 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11985 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11986 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11987 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11988 " Should be %d.%d.%d.%d\n",
11989 fw_ver[0], fw_ver[1], fw_ver[2],
11990 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11991 BCM_5710_FW_MINOR_VERSION,
11992 BCM_5710_FW_REVISION_VERSION,
11993 BCM_5710_FW_ENGINEERING_VERSION);
12000 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12002 const __be32 *source = (const __be32 *)_source;
12003 u32 *target = (u32 *)_target;
12006 for (i = 0; i < n/4; i++)
12007 target[i] = be32_to_cpu(source[i]);
12011 Ops array is stored in the following format:
12012 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12014 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12016 const __be32 *source = (const __be32 *)_source;
12017 struct raw_op *target = (struct raw_op *)_target;
12020 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12021 tmp = be32_to_cpu(source[j]);
12022 target[i].op = (tmp >> 24) & 0xff;
12023 target[i].offset = tmp & 0xffffff;
12024 target[i].raw_data = be32_to_cpu(source[j+1]);
12028 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12030 const __be16 *source = (const __be16 *)_source;
12031 u16 *target = (u16 *)_target;
12034 for (i = 0; i < n/2; i++)
12035 target[i] = be16_to_cpu(source[i]);
12038 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12040 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12041 bp->arr = kmalloc(len, GFP_KERNEL); \
12043 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12044 "for "#arr"\n", len); \
12047 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12048 (u8 *)bp->arr, len); \
12051 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12053 const char *fw_file_name;
12054 struct bnx2x_fw_file_hdr *fw_hdr;
12057 if (CHIP_IS_E1(bp))
12058 fw_file_name = FW_FILE_NAME_E1;
12060 fw_file_name = FW_FILE_NAME_E1H;
12062 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12064 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12066 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12068 goto request_firmware_exit;
12071 rc = bnx2x_check_firmware(bp);
12073 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12074 goto request_firmware_exit;
12077 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12079 /* Initialize the pointers to the init arrays */
12081 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12084 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12087 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12090 /* STORMs firmware */
12091 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12092 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12093 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12094 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12095 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12096 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12097 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12098 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12099 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12100 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12101 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12102 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12103 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12104 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12105 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12106 be32_to_cpu(fw_hdr->csem_pram_data.offset);
12110 init_offsets_alloc_err:
12111 kfree(bp->init_ops);
12112 init_ops_alloc_err:
12113 kfree(bp->init_data);
12114 request_firmware_exit:
12115 release_firmware(bp->firmware);
12121 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12122 const struct pci_device_id *ent)
12124 struct net_device *dev = NULL;
12126 int pcie_width, pcie_speed;
12129 /* dev zeroed in init_etherdev */
12130 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12132 printk(KERN_ERR PFX "Cannot allocate net device\n");
12136 bp = netdev_priv(dev);
12137 bp->msglevel = debug;
12139 pci_set_drvdata(pdev, dev);
12141 rc = bnx2x_init_dev(pdev, dev);
12147 rc = bnx2x_init_bp(bp);
12149 goto init_one_exit;
12151 /* Set init arrays */
12152 rc = bnx2x_init_firmware(bp, &pdev->dev);
12154 printk(KERN_ERR PFX "Error loading firmware\n");
12155 goto init_one_exit;
12158 rc = register_netdev(dev);
12160 dev_err(&pdev->dev, "Cannot register net device\n");
12161 goto init_one_exit;
12164 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12165 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12166 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12167 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12168 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12169 dev->base_addr, bp->pdev->irq);
12170 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12176 iounmap(bp->regview);
12179 iounmap(bp->doorbells);
12183 if (atomic_read(&pdev->enable_cnt) == 1)
12184 pci_release_regions(pdev);
12186 pci_disable_device(pdev);
12187 pci_set_drvdata(pdev, NULL);
12192 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12194 struct net_device *dev = pci_get_drvdata(pdev);
12198 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12201 bp = netdev_priv(dev);
12203 unregister_netdev(dev);
12205 kfree(bp->init_ops_offsets);
12206 kfree(bp->init_ops);
12207 kfree(bp->init_data);
12208 release_firmware(bp->firmware);
12211 iounmap(bp->regview);
12214 iounmap(bp->doorbells);
12218 if (atomic_read(&pdev->enable_cnt) == 1)
12219 pci_release_regions(pdev);
12221 pci_disable_device(pdev);
12222 pci_set_drvdata(pdev, NULL);
12225 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12227 struct net_device *dev = pci_get_drvdata(pdev);
12231 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12234 bp = netdev_priv(dev);
12238 pci_save_state(pdev);
12240 if (!netif_running(dev)) {
12245 netif_device_detach(dev);
12247 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12249 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12256 static int bnx2x_resume(struct pci_dev *pdev)
12258 struct net_device *dev = pci_get_drvdata(pdev);
12263 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12266 bp = netdev_priv(dev);
12270 pci_restore_state(pdev);
12272 if (!netif_running(dev)) {
12277 bnx2x_set_power_state(bp, PCI_D0);
12278 netif_device_attach(dev);
12280 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12287 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12291 bp->state = BNX2X_STATE_ERROR;
12293 bp->rx_mode = BNX2X_RX_MODE_NONE;
12295 bnx2x_netif_stop(bp, 0);
12297 del_timer_sync(&bp->timer);
12298 bp->stats_state = STATS_STATE_DISABLED;
12299 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12302 bnx2x_free_irq(bp);
12304 if (CHIP_IS_E1(bp)) {
12305 struct mac_configuration_cmd *config =
12306 bnx2x_sp(bp, mcast_config);
12308 for (i = 0; i < config->hdr.length; i++)
12309 CAM_INVALIDATE(config->config_table[i]);
12312 /* Free SKBs, SGEs, TPA pool and driver internals */
12313 bnx2x_free_skbs(bp);
12314 for_each_queue(bp, i)
12315 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12316 for_each_queue(bp, i)
12317 netif_napi_del(&bnx2x_fp(bp, i, napi));
12318 bnx2x_free_mem(bp);
12320 bp->state = BNX2X_STATE_CLOSED;
12322 netif_carrier_off(bp->dev);
12327 static void bnx2x_eeh_recover(struct bnx2x *bp)
12331 mutex_init(&bp->port.phy_mutex);
12333 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12334 bp->link_params.shmem_base = bp->common.shmem_base;
12335 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12337 if (!bp->common.shmem_base ||
12338 (bp->common.shmem_base < 0xA0000) ||
12339 (bp->common.shmem_base >= 0xC0000)) {
12340 BNX2X_DEV_INFO("MCP not active\n");
12341 bp->flags |= NO_MCP_FLAG;
12345 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12346 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12347 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12348 BNX2X_ERR("BAD MCP validity signature\n");
12350 if (!BP_NOMCP(bp)) {
12351 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12352 & DRV_MSG_SEQ_NUMBER_MASK);
12353 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12358 * bnx2x_io_error_detected - called when PCI error is detected
12359 * @pdev: Pointer to PCI device
12360 * @state: The current pci connection state
12362 * This function is called after a PCI bus error affecting
12363 * this device has been detected.
12365 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12366 pci_channel_state_t state)
12368 struct net_device *dev = pci_get_drvdata(pdev);
12369 struct bnx2x *bp = netdev_priv(dev);
12373 netif_device_detach(dev);
12375 if (state == pci_channel_io_perm_failure) {
12377 return PCI_ERS_RESULT_DISCONNECT;
12380 if (netif_running(dev))
12381 bnx2x_eeh_nic_unload(bp);
12383 pci_disable_device(pdev);
12387 /* Request a slot reset */
12388 return PCI_ERS_RESULT_NEED_RESET;
12392 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12393 * @pdev: Pointer to PCI device
12395 * Restart the card from scratch, as if from a cold-boot.
12397 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12399 struct net_device *dev = pci_get_drvdata(pdev);
12400 struct bnx2x *bp = netdev_priv(dev);
12404 if (pci_enable_device(pdev)) {
12405 dev_err(&pdev->dev,
12406 "Cannot re-enable PCI device after reset\n");
12408 return PCI_ERS_RESULT_DISCONNECT;
12411 pci_set_master(pdev);
12412 pci_restore_state(pdev);
12414 if (netif_running(dev))
12415 bnx2x_set_power_state(bp, PCI_D0);
12419 return PCI_ERS_RESULT_RECOVERED;
12423 * bnx2x_io_resume - called when traffic can start flowing again
12424 * @pdev: Pointer to PCI device
12426 * This callback is called when the error recovery driver tells us that
12427 * its OK to resume normal operation.
12429 static void bnx2x_io_resume(struct pci_dev *pdev)
12431 struct net_device *dev = pci_get_drvdata(pdev);
12432 struct bnx2x *bp = netdev_priv(dev);
12436 bnx2x_eeh_recover(bp);
12438 if (netif_running(dev))
12439 bnx2x_nic_load(bp, LOAD_NORMAL);
12441 netif_device_attach(dev);
12446 static struct pci_error_handlers bnx2x_err_handler = {
12447 .error_detected = bnx2x_io_error_detected,
12448 .slot_reset = bnx2x_io_slot_reset,
12449 .resume = bnx2x_io_resume,
12452 static struct pci_driver bnx2x_pci_driver = {
12453 .name = DRV_MODULE_NAME,
12454 .id_table = bnx2x_pci_tbl,
12455 .probe = bnx2x_init_one,
12456 .remove = __devexit_p(bnx2x_remove_one),
12457 .suspend = bnx2x_suspend,
12458 .resume = bnx2x_resume,
12459 .err_handler = &bnx2x_err_handler,
12462 static int __init bnx2x_init(void)
12466 printk(KERN_INFO "%s", version);
12468 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12469 if (bnx2x_wq == NULL) {
12470 printk(KERN_ERR PFX "Cannot create workqueue\n");
12474 ret = pci_register_driver(&bnx2x_pci_driver);
12476 printk(KERN_ERR PFX "Cannot register driver\n");
12477 destroy_workqueue(bnx2x_wq);
12482 static void __exit bnx2x_cleanup(void)
12484 pci_unregister_driver(&bnx2x_pci_driver);
12486 destroy_workqueue(bnx2x_wq);
12489 module_init(bnx2x_init);
12490 module_exit(bnx2x_cleanup);
12494 /* count denotes the number of new completions we have seen */
12495 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12497 struct eth_spe *spe;
12499 #ifdef BNX2X_STOP_ON_ERROR
12500 if (unlikely(bp->panic))
12504 spin_lock_bh(&bp->spq_lock);
12505 bp->cnic_spq_pending -= count;
12507 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12508 bp->cnic_spq_pending++) {
12510 if (!bp->cnic_kwq_pending)
12513 spe = bnx2x_sp_get_next(bp);
12514 *spe = *bp->cnic_kwq_cons;
12516 bp->cnic_kwq_pending--;
12518 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12519 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12521 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12522 bp->cnic_kwq_cons = bp->cnic_kwq;
12524 bp->cnic_kwq_cons++;
12526 bnx2x_sp_prod_update(bp);
12527 spin_unlock_bh(&bp->spq_lock);
12530 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12531 struct kwqe_16 *kwqes[], u32 count)
12533 struct bnx2x *bp = netdev_priv(dev);
12536 #ifdef BNX2X_STOP_ON_ERROR
12537 if (unlikely(bp->panic))
12541 spin_lock_bh(&bp->spq_lock);
12543 for (i = 0; i < count; i++) {
12544 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12546 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12549 *bp->cnic_kwq_prod = *spe;
12551 bp->cnic_kwq_pending++;
12553 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12554 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12555 spe->data.mac_config_addr.hi,
12556 spe->data.mac_config_addr.lo,
12557 bp->cnic_kwq_pending);
12559 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12560 bp->cnic_kwq_prod = bp->cnic_kwq;
12562 bp->cnic_kwq_prod++;
12565 spin_unlock_bh(&bp->spq_lock);
12567 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12568 bnx2x_cnic_sp_post(bp, 0);
12573 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12575 struct cnic_ops *c_ops;
12578 mutex_lock(&bp->cnic_mutex);
12579 c_ops = bp->cnic_ops;
12581 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12582 mutex_unlock(&bp->cnic_mutex);
12587 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12589 struct cnic_ops *c_ops;
12593 c_ops = rcu_dereference(bp->cnic_ops);
12595 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12602 * for commands that have no data
12604 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12606 struct cnic_ctl_info ctl = {0};
12610 return bnx2x_cnic_ctl_send(bp, &ctl);
12613 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12615 struct cnic_ctl_info ctl;
12617 /* first we tell CNIC and only then we count this as a completion */
12618 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12619 ctl.data.comp.cid = cid;
12621 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12622 bnx2x_cnic_sp_post(bp, 1);
12625 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12627 struct bnx2x *bp = netdev_priv(dev);
12630 switch (ctl->cmd) {
12631 case DRV_CTL_CTXTBL_WR_CMD: {
12632 u32 index = ctl->data.io.offset;
12633 dma_addr_t addr = ctl->data.io.dma_addr;
12635 bnx2x_ilt_wr(bp, index, addr);
12639 case DRV_CTL_COMPLETION_CMD: {
12640 int count = ctl->data.comp.comp_count;
12642 bnx2x_cnic_sp_post(bp, count);
12646 /* rtnl_lock is held. */
12647 case DRV_CTL_START_L2_CMD: {
12648 u32 cli = ctl->data.ring.client_id;
12650 bp->rx_mode_cl_mask |= (1 << cli);
12651 bnx2x_set_storm_rx_mode(bp);
12655 /* rtnl_lock is held. */
12656 case DRV_CTL_STOP_L2_CMD: {
12657 u32 cli = ctl->data.ring.client_id;
12659 bp->rx_mode_cl_mask &= ~(1 << cli);
12660 bnx2x_set_storm_rx_mode(bp);
12665 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12672 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12674 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12676 if (bp->flags & USING_MSIX_FLAG) {
12677 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12678 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12679 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12681 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12682 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12684 cp->irq_arr[0].status_blk = bp->cnic_sb;
12685 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12686 cp->irq_arr[1].status_blk = bp->def_status_blk;
12687 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12692 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12695 struct bnx2x *bp = netdev_priv(dev);
12696 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12701 if (atomic_read(&bp->intr_sem) != 0)
12704 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12708 bp->cnic_kwq_cons = bp->cnic_kwq;
12709 bp->cnic_kwq_prod = bp->cnic_kwq;
12710 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12712 bp->cnic_spq_pending = 0;
12713 bp->cnic_kwq_pending = 0;
12715 bp->cnic_data = data;
12718 cp->drv_state = CNIC_DRV_STATE_REGD;
12720 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12722 bnx2x_setup_cnic_irq_info(bp);
12723 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12724 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12725 rcu_assign_pointer(bp->cnic_ops, ops);
12730 static int bnx2x_unregister_cnic(struct net_device *dev)
12732 struct bnx2x *bp = netdev_priv(dev);
12733 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12735 mutex_lock(&bp->cnic_mutex);
12736 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12737 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12738 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12741 rcu_assign_pointer(bp->cnic_ops, NULL);
12742 mutex_unlock(&bp->cnic_mutex);
12744 kfree(bp->cnic_kwq);
12745 bp->cnic_kwq = NULL;
12750 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12752 struct bnx2x *bp = netdev_priv(dev);
12753 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12755 cp->drv_owner = THIS_MODULE;
12756 cp->chip_id = CHIP_ID(bp);
12757 cp->pdev = bp->pdev;
12758 cp->io_base = bp->regview;
12759 cp->io_base2 = bp->doorbells;
12760 cp->max_kwqe_pending = 8;
12761 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12762 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12763 cp->ctx_tbl_len = CNIC_ILT_LINES;
12764 cp->starting_cid = BCM_CNIC_CID_START;
12765 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12766 cp->drv_ctl = bnx2x_drv_ctl;
12767 cp->drv_register_cnic = bnx2x_register_cnic;
12768 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12772 EXPORT_SYMBOL(bnx2x_cnic_probe);
12774 #endif /* BCM_CNIC */