#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
-#include <asm/bitops.h>
+#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/delay.h>
#include "bnx2_fw.h"
#include "bnx2_fw2.h"
-#define FW_BUF_SIZE 0x8000
+#define FW_BUF_SIZE 0x10000
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.6.6"
-#define DRV_MODULE_RELDATE "October 2, 2007"
+#define DRV_MODULE_VERSION "1.7.0"
+#define DRV_MODULE_RELDATE "December 11, 2007"
#define RUN_AT(x) (jiffies + (x))
bp->stats_blk = NULL;
}
if (bp->tx_desc_ring) {
- pci_free_consistent(bp->pdev,
- sizeof(struct tx_bd) * TX_DESC_CNT,
+ pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
bp->tx_desc_ring, bp->tx_desc_mapping);
bp->tx_desc_ring = NULL;
}
bp->tx_buf_ring = NULL;
for (i = 0; i < bp->rx_max_ring; i++) {
if (bp->rx_desc_ring[i])
- pci_free_consistent(bp->pdev,
- sizeof(struct rx_bd) * RX_DESC_CNT,
+ pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
bp->rx_desc_ring[i],
bp->rx_desc_mapping[i]);
bp->rx_desc_ring[i] = NULL;
}
vfree(bp->rx_buf_ring);
bp->rx_buf_ring = NULL;
+ for (i = 0; i < bp->rx_max_pg_ring; i++) {
+ if (bp->rx_pg_desc_ring[i])
+ pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
+ bp->rx_pg_desc_ring[i],
+ bp->rx_pg_desc_mapping[i]);
+ bp->rx_pg_desc_ring[i] = NULL;
+ }
+ if (bp->rx_pg_ring)
+ vfree(bp->rx_pg_ring);
+ bp->rx_pg_ring = NULL;
}
static int
{
int i, status_blk_size;
- bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
- GFP_KERNEL);
+ bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
if (bp->tx_buf_ring == NULL)
return -ENOMEM;
- bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
- sizeof(struct tx_bd) *
- TX_DESC_CNT,
+ bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
&bp->tx_desc_mapping);
if (bp->tx_desc_ring == NULL)
goto alloc_mem_err;
- bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
- bp->rx_max_ring);
+ bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
if (bp->rx_buf_ring == NULL)
goto alloc_mem_err;
- memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
- bp->rx_max_ring);
+ memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
for (i = 0; i < bp->rx_max_ring; i++) {
bp->rx_desc_ring[i] =
- pci_alloc_consistent(bp->pdev,
- sizeof(struct rx_bd) * RX_DESC_CNT,
+ pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
&bp->rx_desc_mapping[i]);
if (bp->rx_desc_ring[i] == NULL)
goto alloc_mem_err;
}
+ if (bp->rx_pg_ring_size) {
+ bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
+ bp->rx_max_pg_ring);
+ if (bp->rx_pg_ring == NULL)
+ goto alloc_mem_err;
+
+ memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
+ bp->rx_max_pg_ring);
+ }
+
+ for (i = 0; i < bp->rx_max_pg_ring; i++) {
+ bp->rx_pg_desc_ring[i] =
+ pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
+ &bp->rx_pg_desc_mapping[i]);
+ if (bp->rx_pg_desc_ring[i] == NULL)
+ goto alloc_mem_err;
+
+ }
+
/* Combine status and statistics blocks into one allocation. */
status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
bp->status_stats_size = status_blk_size +
vcid_addr += (i << PHY_CTX_SHIFT);
pcid_addr += (i << PHY_CTX_SHIFT);
- REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
+ REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
/* Zero out the context. */
for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
- CTX_WR(bp, 0x00, offset, 0);
-
- REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
- REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
+ CTX_WR(bp, vcid_addr, offset, 0);
}
}
}
}
static inline int
+bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
+{
+ dma_addr_t mapping;
+ struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
+ struct rx_bd *rxbd =
+ &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
+ struct page *page = alloc_page(GFP_ATOMIC);
+
+ if (!page)
+ return -ENOMEM;
+ mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ rx_pg->page = page;
+ pci_unmap_addr_set(rx_pg, mapping, mapping);
+ rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
+ rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+ return 0;
+}
+
+static void
+bnx2_free_rx_page(struct bnx2 *bp, u16 index)
+{
+ struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
+ struct page *page = rx_pg->page;
+
+ if (!page)
+ return;
+
+ pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ __free_page(page);
+ rx_pg->page = NULL;
+}
+
+static inline int
bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
{
struct sk_buff *skb;
}
+static inline u16
+bnx2_get_hw_tx_cons(struct bnx2 *bp)
+{
+ u16 cons;
+
+ cons = bp->status_blk->status_tx_quick_consumer_index0;
+
+ if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
+ cons++;
+ return cons;
+}
+
static void
bnx2_tx_int(struct bnx2 *bp)
{
- struct status_block *sblk = bp->status_blk;
u16 hw_cons, sw_cons, sw_ring_cons;
int tx_free_bd = 0;
- hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
- if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
- hw_cons++;
- }
+ hw_cons = bnx2_get_hw_tx_cons(bp);
sw_cons = bp->tx_cons;
while (sw_cons != hw_cons) {
dev_kfree_skb(skb);
- hw_cons = bp->hw_tx_cons =
- sblk->status_tx_quick_consumer_index0;
-
- if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
- hw_cons++;
- }
+ hw_cons = bnx2_get_hw_tx_cons(bp);
}
+ bp->hw_tx_cons = hw_cons;
bp->tx_cons = sw_cons;
/* Need to make the tx_cons update visible to bnx2_start_xmit()
* before checking for netif_queue_stopped(). Without the
}
}
+static void
+bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
+{
+ struct sw_pg *cons_rx_pg, *prod_rx_pg;
+ struct rx_bd *cons_bd, *prod_bd;
+ dma_addr_t mapping;
+ int i;
+ u16 hw_prod = bp->rx_pg_prod, prod;
+ u16 cons = bp->rx_pg_cons;
+
+ for (i = 0; i < count; i++) {
+ prod = RX_PG_RING_IDX(hw_prod);
+
+ prod_rx_pg = &bp->rx_pg_ring[prod];
+ cons_rx_pg = &bp->rx_pg_ring[cons];
+ cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+ if (i == 0 && skb) {
+ struct page *page;
+ struct skb_shared_info *shinfo;
+
+ shinfo = skb_shinfo(skb);
+ shinfo->nr_frags--;
+ page = shinfo->frags[shinfo->nr_frags].page;
+ shinfo->frags[shinfo->nr_frags].page = NULL;
+ mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ cons_rx_pg->page = page;
+ pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
+ dev_kfree_skb(skb);
+ }
+ if (prod != cons) {
+ prod_rx_pg->page = cons_rx_pg->page;
+ cons_rx_pg->page = NULL;
+ pci_unmap_addr_set(prod_rx_pg, mapping,
+ pci_unmap_addr(cons_rx_pg, mapping));
+
+ prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
+ prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
+
+ }
+ cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
+ hw_prod = NEXT_RX_BD(hw_prod);
+ }
+ bp->rx_pg_prod = hw_prod;
+ bp->rx_pg_cons = cons;
+}
+
static inline void
bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
u16 cons, u16 prod)
}
static int
+bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
+ unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
+{
+ int err;
+ u16 prod = ring_idx & 0xffff;
+
+ err = bnx2_alloc_rx_skb(bp, prod);
+ if (unlikely(err)) {
+ bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
+ if (hdr_len) {
+ unsigned int raw_len = len + 4;
+ int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
+
+ bnx2_reuse_rx_skb_pages(bp, NULL, pages);
+ }
+ return err;
+ }
+
+ skb_reserve(skb, bp->rx_offset);
+ pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (hdr_len == 0) {
+ skb_put(skb, len);
+ return 0;
+ } else {
+ unsigned int i, frag_len, frag_size, pages;
+ struct sw_pg *rx_pg;
+ u16 pg_cons = bp->rx_pg_cons;
+ u16 pg_prod = bp->rx_pg_prod;
+
+ frag_size = len + 4 - hdr_len;
+ pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
+ skb_put(skb, hdr_len);
+
+ for (i = 0; i < pages; i++) {
+ frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
+ if (unlikely(frag_len <= 4)) {
+ unsigned int tail = 4 - frag_len;
+
+ bp->rx_pg_cons = pg_cons;
+ bp->rx_pg_prod = pg_prod;
+ bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
+ skb->len -= tail;
+ if (i == 0) {
+ skb->tail -= tail;
+ } else {
+ skb_frag_t *frag =
+ &skb_shinfo(skb)->frags[i - 1];
+ frag->size -= tail;
+ skb->data_len -= tail;
+ skb->truesize -= tail;
+ }
+ return 0;
+ }
+ rx_pg = &bp->rx_pg_ring[pg_cons];
+
+ pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+ if (i == pages - 1)
+ frag_len -= 4;
+
+ skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
+ rx_pg->page = NULL;
+
+ err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
+ if (unlikely(err)) {
+ bp->rx_pg_cons = pg_cons;
+ bp->rx_pg_prod = pg_prod;
+ bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
+ return err;
+ }
+
+ frag_size -= frag_len;
+ skb->data_len += frag_len;
+ skb->truesize += frag_len;
+ skb->len += frag_len;
+
+ pg_prod = NEXT_RX_BD(pg_prod);
+ pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
+ }
+ bp->rx_pg_prod = pg_prod;
+ bp->rx_pg_cons = pg_cons;
+ }
+ return 0;
+}
+
+static inline u16
+bnx2_get_hw_rx_cons(struct bnx2 *bp)
+{
+ u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
+
+ if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
+ cons++;
+ return cons;
+}
+
+static int
bnx2_rx_int(struct bnx2 *bp, int budget)
{
- struct status_block *sblk = bp->status_blk;
u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
struct l2_fhdr *rx_hdr;
- int rx_pkt = 0;
+ int rx_pkt = 0, pg_ring_used = 0;
- hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
- if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
- hw_cons++;
- }
+ hw_cons = bnx2_get_hw_rx_cons(bp);
sw_cons = bp->rx_cons;
sw_prod = bp->rx_prod;
*/
rmb();
while (sw_cons != hw_cons) {
- unsigned int len;
+ unsigned int len, hdr_len;
u32 status;
struct sw_bd *rx_buf;
struct sk_buff *skb;
bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
rx_hdr = (struct l2_fhdr *) skb->data;
- len = rx_hdr->l2_fhdr_pkt_len - 4;
+ len = rx_hdr->l2_fhdr_pkt_len;
if ((status = rx_hdr->l2_fhdr_status) &
(L2_FHDR_ERRORS_BAD_CRC |
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME)) {
- goto reuse_rx;
+ bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
+ goto next_rx;
+ }
+ hdr_len = 0;
+ if (status & L2_FHDR_STATUS_SPLIT) {
+ hdr_len = rx_hdr->l2_fhdr_ip_xsum;
+ pg_ring_used = 1;
+ } else if (len > bp->rx_jumbo_thresh) {
+ hdr_len = bp->rx_jumbo_thresh;
+ pg_ring_used = 1;
}
- /* Since we don't have a jumbo ring, copy small packets
- * if mtu > 1500
- */
- if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
+ len -= 4;
+
+ if (len <= bp->rx_copy_thresh) {
struct sk_buff *new_skb;
new_skb = netdev_alloc_skb(bp->dev, len + 2);
- if (new_skb == NULL)
- goto reuse_rx;
+ if (new_skb == NULL) {
+ bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
+ sw_ring_prod);
+ goto next_rx;
+ }
/* aligned copy */
skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
sw_ring_cons, sw_ring_prod);
skb = new_skb;
- }
- else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
- pci_unmap_single(bp->pdev, dma_addr,
- bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
-
- skb_reserve(skb, bp->rx_offset);
- skb_put(skb, len);
- }
- else {
-reuse_rx:
- bnx2_reuse_rx_skb(bp, skb,
- sw_ring_cons, sw_ring_prod);
+ } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
+ (sw_ring_cons << 16) | sw_ring_prod)))
goto next_rx;
- }
skb->protocol = eth_type_trans(skb, bp->dev);
/* Refresh hw_cons to see if there is new work */
if (sw_cons == hw_cons) {
- hw_cons = bp->hw_rx_cons =
- sblk->status_rx_quick_consumer_index0;
- if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
- hw_cons++;
+ hw_cons = bnx2_get_hw_rx_cons(bp);
rmb();
}
}
bp->rx_cons = sw_cons;
bp->rx_prod = sw_prod;
+ if (pg_ring_used)
+ REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
+ bp->rx_pg_prod);
+
REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
{
struct status_block *sblk = bp->status_blk;
- if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
- (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
+ if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
+ (bnx2_get_hw_tx_cons(bp) != bp->hw_tx_cons))
return 1;
if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
return 0;
}
-static int
-bnx2_poll(struct napi_struct *napi, int budget)
+static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
{
- struct bnx2 *bp = container_of(napi, struct bnx2, napi);
- struct net_device *dev = bp->dev;
struct status_block *sblk = bp->status_blk;
u32 status_attn_bits = sblk->status_attn_bits;
u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
- int work_done = 0;
if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
(status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
REG_RD(bp, BNX2_HC_COMMAND);
}
- if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
+ if (bnx2_get_hw_tx_cons(bp) != bp->hw_tx_cons)
bnx2_tx_int(bp);
- if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons)
- work_done = bnx2_rx_int(bp, budget);
+ if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
+ work_done += bnx2_rx_int(bp, budget - work_done);
- bp->last_status_idx = bp->status_blk->status_idx;
- rmb();
+ return work_done;
+}
+
+static int bnx2_poll(struct napi_struct *napi, int budget)
+{
+ struct bnx2 *bp = container_of(napi, struct bnx2, napi);
+ int work_done = 0;
+ struct status_block *sblk = bp->status_blk;
+
+ while (1) {
+ work_done = bnx2_poll_work(bp, work_done, budget);
- if (!bnx2_has_work(bp)) {
- netif_rx_complete(dev, napi);
- if (likely(bp->flags & USING_MSI_FLAG)) {
+ if (unlikely(work_done >= budget))
+ break;
+
+ /* bp->last_status_idx is used below to tell the hw how
+ * much work has been processed, so we must read it before
+ * checking for more work.
+ */
+ bp->last_status_idx = sblk->status_idx;
+ rmb();
+ if (likely(!bnx2_has_work(bp))) {
+ netif_rx_complete(bp->dev, napi);
+ if (likely(bp->flags & USING_MSI_FLAG)) {
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bp->last_status_idx);
+ break;
+ }
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
bp->last_status_idx);
- return 0;
- }
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
- bp->last_status_idx);
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- bp->last_status_idx);
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bp->last_status_idx);
+ break;
+ }
}
return work_done;
{
struct cpu_reg cpu_reg;
struct fw_info *fw;
- int rc;
- void *text;
+ int rc, rv2p_len;
+ void *text, *rv2p;
/* Initialize the RV2P processor. */
text = vmalloc(FW_BUF_SIZE);
if (!text)
return -ENOMEM;
- rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ rv2p = bnx2_xi_rv2p_proc1;
+ rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
+ } else {
+ rv2p = bnx2_rv2p_proc1;
+ rv2p_len = sizeof(bnx2_rv2p_proc1);
+ }
+ rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
if (rc < 0)
goto init_cpu_err;
load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
- rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ rv2p = bnx2_xi_rv2p_proc2;
+ rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
+ } else {
+ rv2p = bnx2_rv2p_proc2;
+ rv2p_len = sizeof(bnx2_rv2p_proc2);
+ }
+ rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
if (rc < 0)
goto init_cpu_err;
cpu_reg.spad_base = BNX2_CP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_cp_fw_09;
+ else
+ fw = &bnx2_cp_fw_06;
+
+ fw->text = text;
+ rc = load_cpu_fw(bp, &cpu_reg, fw);
- fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
- if (rc)
- goto init_cpu_err;
- }
init_cpu_err:
vfree(text);
return rc;
autoneg = bp->autoneg;
advertising = bp->advertising;
- bp->autoneg = AUTONEG_SPEED;
- bp->advertising = ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_Autoneg;
+ if (bp->phy_port == PORT_TP) {
+ bp->autoneg = AUTONEG_SPEED;
+ bp->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_Autoneg;
+ }
- bnx2_setup_copper_phy(bp);
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_setup_phy(bp, bp->phy_port);
+ spin_unlock_bh(&bp->phy_lock);
bp->autoneg = autoneg;
bp->advertising = advertising;
/* Enable port mode. */
val &= ~BNX2_EMAC_MODE_PORT;
- val |= BNX2_EMAC_MODE_PORT_MII |
- BNX2_EMAC_MODE_MPKT_RCVD |
+ val |= BNX2_EMAC_MODE_MPKT_RCVD |
BNX2_EMAC_MODE_ACPI_RCVD |
BNX2_EMAC_MODE_MPKT;
+ if (bp->phy_port == PORT_TP)
+ val |= BNX2_EMAC_MODE_PORT_MII;
+ else {
+ val |= BNX2_EMAC_MODE_PORT_GMII;
+ if (bp->line_speed == SPEED_2500)
+ val |= BNX2_EMAC_MODE_25G_MODE;
+ }
REG_WR(bp, BNX2_EMAC_MODE, val);
}
static void
-bnx2_init_rx_ring(struct bnx2 *bp)
+bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
+ int num_rings)
{
- struct rx_bd *rxbd;
int i;
- u16 prod, ring_prod;
- u32 val;
-
- /* 8 for CRC and VLAN */
- bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
- /* hw alignment */
- bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
-
- ring_prod = prod = bp->rx_prod = 0;
- bp->rx_cons = 0;
- bp->hw_rx_cons = 0;
- bp->rx_prod_bseq = 0;
+ struct rx_bd *rxbd;
- for (i = 0; i < bp->rx_max_ring; i++) {
+ for (i = 0; i < num_rings; i++) {
int j;
- rxbd = &bp->rx_desc_ring[i][0];
+ rxbd = &rx_ring[i][0];
for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
- rxbd->rx_bd_len = bp->rx_buf_use_size;
+ rxbd->rx_bd_len = buf_size;
rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
}
- if (i == (bp->rx_max_ring - 1))
+ if (i == (num_rings - 1))
j = 0;
else
j = i + 1;
- rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
- rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
- 0xffffffff;
+ rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
+ rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
+ }
+}
+
+static void
+bnx2_init_rx_ring(struct bnx2 *bp)
+{
+ int i;
+ u16 prod, ring_prod;
+ u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
+
+ bp->rx_prod = 0;
+ bp->rx_cons = 0;
+ bp->rx_prod_bseq = 0;
+ bp->rx_pg_prod = 0;
+ bp->rx_pg_cons = 0;
+
+ bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
+ bp->rx_buf_use_size, bp->rx_max_ring);
+
+ CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
+ if (bp->rx_pg_ring_size) {
+ bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
+ bp->rx_pg_desc_mapping,
+ PAGE_SIZE, bp->rx_max_pg_ring);
+ val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
+ CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
+ CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
+ BNX2_L2CTX_RBDC_JUMBO_KEY);
+
+ val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
+ CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
+
+ val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
+ CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
}
val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
val |= 0x02 << 8;
- CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
+ CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
val = (u64) bp->rx_desc_mapping[0] >> 32;
- CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
+ CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
- CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
+ CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+ ring_prod = prod = bp->rx_pg_prod;
+ for (i = 0; i < bp->rx_pg_ring_size; i++) {
+ if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
+ break;
+ prod = NEXT_RX_BD(prod);
+ ring_prod = RX_PG_RING_IDX(prod);
+ }
+ bp->rx_pg_prod = prod;
+
+ ring_prod = prod = bp->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
break;
}
bp->rx_prod = prod;
+ REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
}
-static void
-bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
+static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
{
- u32 num_rings, max;
+ u32 max, num_rings = 1;
- bp->rx_ring_size = size;
- num_rings = 1;
- while (size > MAX_RX_DESC_CNT) {
- size -= MAX_RX_DESC_CNT;
+ while (ring_size > MAX_RX_DESC_CNT) {
+ ring_size -= MAX_RX_DESC_CNT;
num_rings++;
}
/* round to next power of 2 */
- max = MAX_RX_RINGS;
+ max = max_size;
while ((max & num_rings) == 0)
max >>= 1;
if (num_rings != max)
max <<= 1;
- bp->rx_max_ring = max;
+ return max;
+}
+
+static void
+bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
+{
+ u32 rx_size, rx_space, jumbo_size;
+
+ /* 8 for CRC and VLAN */
+ rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
+
+ rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
+ sizeof(struct skb_shared_info);
+
+ bp->rx_copy_thresh = RX_COPY_THRESH;
+ bp->rx_pg_ring_size = 0;
+ bp->rx_max_pg_ring = 0;
+ bp->rx_max_pg_ring_idx = 0;
+ if (rx_space > PAGE_SIZE) {
+ int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+
+ jumbo_size = size * pages;
+ if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
+ jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
+
+ bp->rx_pg_ring_size = jumbo_size;
+ bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
+ MAX_RX_PG_RINGS);
+ bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
+ rx_size = RX_COPY_THRESH + bp->rx_offset;
+ bp->rx_copy_thresh = 0;
+ }
+
+ bp->rx_buf_use_size = rx_size;
+ /* hw alignment */
+ bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+ bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
+ bp->rx_ring_size = size;
+ bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
}
dev_kfree_skb(skb);
}
+ for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
+ bnx2_free_rx_page(bp, i);
}
static void
else
return -EINVAL;
- pkt_size = 1514;
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
REG_RD(bp, BNX2_HC_COMMAND);
udelay(5);
- rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
+ rx_start_idx = bnx2_get_hw_rx_cons(bp);
num_pkts = 0;
pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
+ if (bnx2_get_hw_tx_cons(bp) != bp->tx_prod)
goto loopback_test_done;
- }
- rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
+ rx_idx = bnx2_get_hw_rx_cons(bp);
if (rx_idx != rx_start_idx + num_pkts) {
goto loopback_test_done;
}
bnx2_request_irq(struct bnx2 *bp)
{
struct net_device *dev = bp->dev;
- int rc = 0;
-
- if (bp->flags & USING_MSI_FLAG) {
- irq_handler_t fn = bnx2_msi;
-
- if (bp->flags & ONE_SHOT_MSI_FLAG)
- fn = bnx2_msi_1shot;
+ unsigned long flags;
+ struct bnx2_irq *irq = &bp->irq_tbl[0];
+ int rc;
- rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
- } else
- rc = request_irq(bp->pdev->irq, bnx2_interrupt,
- IRQF_SHARED, dev->name, dev);
+ if (bp->flags & USING_MSI_FLAG)
+ flags = 0;
+ else
+ flags = IRQF_SHARED;
+ rc = request_irq(irq->vector, irq->handler, flags, dev->name, dev);
return rc;
}
{
struct net_device *dev = bp->dev;
+ free_irq(bp->irq_tbl[0].vector, dev);
if (bp->flags & USING_MSI_FLAG) {
- free_irq(bp->pdev->irq, dev);
pci_disable_msi(bp->pdev);
bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
- } else
- free_irq(bp->pdev->irq, dev);
+ }
+}
+
+static void
+bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
+{
+ bp->irq_tbl[0].handler = bnx2_interrupt;
+ strcpy(bp->irq_tbl[0].name, bp->dev->name);
+
+ if ((bp->flags & MSI_CAP_FLAG) && !dis_msi) {
+ if (pci_enable_msi(bp->pdev) == 0) {
+ bp->flags |= USING_MSI_FLAG;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ bp->flags |= ONE_SHOT_MSI_FLAG;
+ bp->irq_tbl[0].handler = bnx2_msi_1shot;
+ } else
+ bp->irq_tbl[0].handler = bnx2_msi;
+ }
+ }
+
+ bp->irq_tbl[0].vector = bp->pdev->irq;
}
/* Called with rtnl_lock */
if (rc)
return rc;
+ bnx2_setup_int_mode(bp, disable_msi);
napi_enable(&bp->napi);
-
- if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
- if (pci_enable_msi(bp->pdev) == 0) {
- bp->flags |= USING_MSI_FLAG;
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
- bp->flags |= ONE_SHOT_MSI_FLAG;
- }
- }
rc = bnx2_request_irq(bp);
if (rc) {
bnx2_disable_int(bp);
bnx2_free_irq(bp);
+ bnx2_setup_int_mode(bp, 1);
+
rc = bnx2_init_nic(bp);
if (!rc)
ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
ering->rx_mini_max_pending = 0;
- ering->rx_jumbo_max_pending = 0;
+ ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
ering->rx_pending = bp->rx_ring_size;
ering->rx_mini_pending = 0;
- ering->rx_jumbo_pending = 0;
+ ering->rx_jumbo_pending = bp->rx_pg_ring_size;
ering->tx_max_pending = MAX_TX_DESC_CNT;
ering->tx_pending = bp->tx_ring_size;
}
static int
-bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
{
- struct bnx2 *bp = netdev_priv(dev);
-
- if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
- (ering->tx_pending > MAX_TX_DESC_CNT) ||
- (ering->tx_pending <= MAX_SKB_FRAGS)) {
-
- return -EINVAL;
- }
if (netif_running(bp->dev)) {
bnx2_netif_stop(bp);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
bnx2_free_mem(bp);
}
- bnx2_set_rx_ring_size(bp, ering->rx_pending);
- bp->tx_ring_size = ering->tx_pending;
+ bnx2_set_rx_ring_size(bp, rx);
+ bp->tx_ring_size = tx;
if (netif_running(bp->dev)) {
int rc;
bnx2_init_nic(bp);
bnx2_netif_start(bp);
}
-
return 0;
}
+static int
+bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ int rc;
+
+ if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
+ (ering->tx_pending > MAX_TX_DESC_CNT) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS)) {
+
+ return -EINVAL;
+ }
+ rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
+ return rc;
+}
+
static void
bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
return -EINVAL;
dev->mtu = new_mtu;
- if (netif_running(dev)) {
- bnx2_netif_stop(bp);
-
- bnx2_init_nic(bp);
-
- bnx2_netif_start(bp);
- }
- return 0;
+ return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
}
#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
/* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device(pdev);
if (rc) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
goto err_out;
}
if (i != 2)
bp->fw_version[j++] = '.';
}
- if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
- BNX2_PORT_FEATURE_ASF_ENABLED) {
+ reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
+ if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
+ bp->wol = 1;
+
+ if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
bp->flags |= ASF_ENABLE_FLAG;
for (i = 0; i < 30; i++) {
bp->mac_addr[4] = (u8) (reg >> 8);
bp->mac_addr[5] = (u8) reg;
+ bp->rx_offset = sizeof(struct l2_fhdr) + 2;
+
bp->tx_ring_size = MAX_TX_DESC_CNT;
bnx2_set_rx_ring_size(bp, 255);
bp->rx_csum = 1;
- bp->rx_offset = sizeof(struct l2_fhdr) + 2;
-
bp->tx_quick_cons_trip_int = 20;
bp->tx_quick_cons_trip = 20;
bp->tx_ticks_int = 80;
bp->phy_port = PORT_TP;
if (bp->phy_flags & PHY_SERDES_FLAG) {
bp->phy_port = PORT_FIBRE;
- bp->flags |= NO_WOL_FLAG;
+ reg = REG_RD_IND(bp, bp->shmem_base +
+ BNX2_SHARED_HW_CFG_CONFIG);
+ if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
+ bp->flags |= NO_WOL_FLAG;
+ bp->wol = 0;
+ }
if (CHIP_NUM(bp) != CHIP_NUM_5706) {
bp->phy_addr = 2;
- reg = REG_RD_IND(bp, bp->shmem_base +
- BNX2_SHARED_HW_CFG_CONFIG);
if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
}
} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
CHIP_NUM(bp) == CHIP_NUM_5708)
bp->phy_flags |= PHY_CRC_FIX_FLAG;
- else if (CHIP_ID(bp) == CHIP_ID_5709_A0 ||
- CHIP_ID(bp) == CHIP_ID_5709_A1)
+ else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
+ (CHIP_REV(bp) == CHIP_REV_Ax ||
+ CHIP_REV(bp) == CHIP_REV_Bx))
bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
(CHIP_ID(bp) == CHIP_ID_5708_B0) ||
- (CHIP_ID(bp) == CHIP_ID_5708_B1))
+ (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
bp->flags |= NO_WOL_FLAG;
+ bp->wol = 0;
+ }
if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
bp->tx_quick_cons_trip_int =