#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.7.0"
-#define DRV_MODULE_RELDATE "December 11, 2007"
+#define DRV_MODULE_VERSION "1.7.1"
+#define DRV_MODULE_RELDATE "December 19, 2007"
#define RUN_AT(x) (jiffies + (x))
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
-static inline u32 bnx2_tx_avail(struct bnx2 *bp)
+static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
u32 diff;
/* The ring uses 256 indices for 255 entries, one of them
* needs to be skipped.
*/
- diff = bp->tx_prod - bp->tx_cons;
+ diff = bp->tx_prod - bnapi->tx_cons;
if (unlikely(diff >= TX_DESC_CNT)) {
diff &= 0xffff;
if (diff == TX_DESC_CNT)
static void
bnx2_disable_int(struct bnx2 *bp)
{
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
- BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+ int i;
+ struct bnx2_napi *bnapi;
+
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ bnapi = &bp->bnx2_napi[i];
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+ }
REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
}
static void
bnx2_enable_int(struct bnx2 *bp)
{
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
+ int i;
+ struct bnx2_napi *bnapi;
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ bnapi = &bp->bnx2_napi[i];
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
+ bnapi->last_status_idx);
+
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
+ }
REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
}
static void
bnx2_disable_int_sync(struct bnx2 *bp)
{
+ int i;
+
atomic_inc(&bp->intr_sem);
bnx2_disable_int(bp);
- synchronize_irq(bp->pdev->irq);
+ for (i = 0; i < bp->irq_nvecs; i++)
+ synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+static void
+bnx2_napi_disable(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->irq_nvecs; i++)
+ napi_disable(&bp->bnx2_napi[i].napi);
+}
+
+static void
+bnx2_napi_enable(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->irq_nvecs; i++)
+ napi_enable(&bp->bnx2_napi[i].napi);
}
static void
{
bnx2_disable_int_sync(bp);
if (netif_running(bp->dev)) {
- napi_disable(&bp->napi);
+ bnx2_napi_disable(bp);
netif_tx_disable(bp->dev);
bp->dev->trans_start = jiffies; /* prevent tx timeout */
}
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
netif_wake_queue(bp->dev);
- napi_enable(&bp->napi);
+ bnx2_napi_enable(bp);
bnx2_enable_int(bp);
}
}
/* Combine status and statistics blocks into one allocation. */
status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
+ if (bp->flags & MSIX_CAP_FLAG)
+ status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
+ BNX2_SBLK_MSIX_ALIGN_SIZE);
bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block);
memset(bp->status_blk, 0, bp->status_stats_size);
+ bp->bnx2_napi[0].status_blk = bp->status_blk;
+ if (bp->flags & MSIX_CAP_FLAG) {
+ for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+
+ bnapi->status_blk_msix = (void *)
+ ((unsigned long) bp->status_blk +
+ BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ bnapi->int_num = i << 24;
+ }
+ }
+
bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
status_blk_size);
}
static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
+bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
{
struct sk_buff *skb;
struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
- bp->rx_prod_bseq += bp->rx_buf_use_size;
+ bnapi->rx_prod_bseq += bp->rx_buf_use_size;
return 0;
}
static int
-bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
+bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
{
- struct status_block *sblk = bp->status_blk;
+ struct status_block *sblk = bnapi->status_blk;
u32 new_link_state, old_link_state;
int is_set = 1;
}
static void
-bnx2_phy_int(struct bnx2 *bp)
+bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
- if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
+ if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
spin_lock(&bp->phy_lock);
bnx2_set_link(bp);
spin_unlock(&bp->phy_lock);
}
- if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
+ if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
bnx2_set_remote_link(bp);
}
-static void
-bnx2_tx_int(struct bnx2 *bp)
+static inline u16
+bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
+{
+ u16 cons;
+
+ if (bnapi->int_num == 0)
+ cons = bnapi->status_blk->status_tx_quick_consumer_index0;
+ else
+ cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
+
+ if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
+ cons++;
+ return cons;
+}
+
+static int
+bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
{
- struct status_block *sblk = bp->status_blk;
u16 hw_cons, sw_cons, sw_ring_cons;
- int tx_free_bd = 0;
+ int tx_pkt = 0;
- hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
- if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
- hw_cons++;
- }
- sw_cons = bp->tx_cons;
+ hw_cons = bnx2_get_hw_tx_cons(bnapi);
+ sw_cons = bnapi->tx_cons;
while (sw_cons != hw_cons) {
struct sw_bd *tx_buf;
sw_cons = NEXT_TX_BD(sw_cons);
- tx_free_bd += last + 1;
-
dev_kfree_skb(skb);
+ tx_pkt++;
+ if (tx_pkt == budget)
+ break;
- hw_cons = bp->hw_tx_cons =
- sblk->status_tx_quick_consumer_index0;
-
- if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
- hw_cons++;
- }
+ hw_cons = bnx2_get_hw_tx_cons(bnapi);
}
- bp->tx_cons = sw_cons;
+ bnapi->hw_tx_cons = hw_cons;
+ bnapi->tx_cons = sw_cons;
/* Need to make the tx_cons update visible to bnx2_start_xmit()
* before checking for netif_queue_stopped(). Without the
* memory barrier, there is a small possibility that bnx2_start_xmit()
smp_mb();
if (unlikely(netif_queue_stopped(bp->dev)) &&
- (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
+ (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
netif_tx_lock(bp->dev);
if ((netif_queue_stopped(bp->dev)) &&
- (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
+ (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
netif_wake_queue(bp->dev);
netif_tx_unlock(bp->dev);
}
+ return tx_pkt;
}
static void
-bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
+bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
+ struct sk_buff *skb, int count)
{
struct sw_pg *cons_rx_pg, *prod_rx_pg;
struct rx_bd *cons_bd, *prod_bd;
dma_addr_t mapping;
int i;
- u16 hw_prod = bp->rx_pg_prod, prod;
- u16 cons = bp->rx_pg_cons;
+ u16 hw_prod = bnapi->rx_pg_prod, prod;
+ u16 cons = bnapi->rx_pg_cons;
for (i = 0; i < count; i++) {
prod = RX_PG_RING_IDX(hw_prod);
cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
hw_prod = NEXT_RX_BD(hw_prod);
}
- bp->rx_pg_prod = hw_prod;
- bp->rx_pg_cons = cons;
+ bnapi->rx_pg_prod = hw_prod;
+ bnapi->rx_pg_cons = cons;
}
static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
+bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
u16 cons, u16 prod)
{
struct sw_bd *cons_rx_buf, *prod_rx_buf;
pci_unmap_addr(cons_rx_buf, mapping),
bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
- bp->rx_prod_bseq += bp->rx_buf_use_size;
+ bnapi->rx_prod_bseq += bp->rx_buf_use_size;
prod_rx_buf->skb = skb;
}
static int
-bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
- unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
+ unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
+ u32 ring_idx)
{
int err;
u16 prod = ring_idx & 0xffff;
- err = bnx2_alloc_rx_skb(bp, prod);
+ err = bnx2_alloc_rx_skb(bp, bnapi, prod);
if (unlikely(err)) {
- bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
+ bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
if (hdr_len) {
unsigned int raw_len = len + 4;
int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
- bnx2_reuse_rx_skb_pages(bp, NULL, pages);
+ bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
}
return err;
}
} else {
unsigned int i, frag_len, frag_size, pages;
struct sw_pg *rx_pg;
- u16 pg_cons = bp->rx_pg_cons;
- u16 pg_prod = bp->rx_pg_prod;
+ u16 pg_cons = bnapi->rx_pg_cons;
+ u16 pg_prod = bnapi->rx_pg_prod;
frag_size = len + 4 - hdr_len;
pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
if (unlikely(frag_len <= 4)) {
unsigned int tail = 4 - frag_len;
- bp->rx_pg_cons = pg_cons;
- bp->rx_pg_prod = pg_prod;
- bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
+ bnapi->rx_pg_cons = pg_cons;
+ bnapi->rx_pg_prod = pg_prod;
+ bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
+ pages - i);
skb->len -= tail;
if (i == 0) {
skb->tail -= tail;
err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
if (unlikely(err)) {
- bp->rx_pg_cons = pg_cons;
- bp->rx_pg_prod = pg_prod;
- bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
+ bnapi->rx_pg_cons = pg_cons;
+ bnapi->rx_pg_prod = pg_prod;
+ bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
+ pages - i);
return err;
}
pg_prod = NEXT_RX_BD(pg_prod);
pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
}
- bp->rx_pg_prod = pg_prod;
- bp->rx_pg_cons = pg_cons;
+ bnapi->rx_pg_prod = pg_prod;
+ bnapi->rx_pg_cons = pg_cons;
}
return 0;
}
static inline u16
-bnx2_get_hw_rx_cons(struct bnx2 *bp)
+bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
{
- u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
+ u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
cons++;
}
static int
-bnx2_rx_int(struct bnx2 *bp, int budget)
+bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
{
u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
struct l2_fhdr *rx_hdr;
int rx_pkt = 0, pg_ring_used = 0;
- hw_cons = bnx2_get_hw_rx_cons(bp);
- sw_cons = bp->rx_cons;
- sw_prod = bp->rx_prod;
+ hw_cons = bnx2_get_hw_rx_cons(bnapi);
+ sw_cons = bnapi->rx_cons;
+ sw_prod = bnapi->rx_prod;
/* Memory barrier necessary as speculative reads of the rx
* buffer can be ahead of the index in the status block
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME)) {
- bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
+ bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
+ sw_ring_prod);
goto next_rx;
}
hdr_len = 0;
new_skb = netdev_alloc_skb(bp->dev, len + 2);
if (new_skb == NULL) {
- bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
+ bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
skb_reserve(new_skb, 2);
skb_put(new_skb, len);
- bnx2_reuse_rx_skb(bp, skb,
+ bnx2_reuse_rx_skb(bp, bnapi, skb,
sw_ring_cons, sw_ring_prod);
skb = new_skb;
- } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
- (sw_ring_cons << 16) | sw_ring_prod)))
+ } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
+ dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
goto next_rx;
skb->protocol = eth_type_trans(skb, bp->dev);
}
#ifdef BCM_VLAN
- if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
+ if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
vlan_hwaccel_receive_skb(skb, bp->vlgrp,
rx_hdr->l2_fhdr_vlan_tag);
}
/* Refresh hw_cons to see if there is new work */
if (sw_cons == hw_cons) {
- hw_cons = bnx2_get_hw_rx_cons(bp);
+ hw_cons = bnx2_get_hw_rx_cons(bnapi);
rmb();
}
}
- bp->rx_cons = sw_cons;
- bp->rx_prod = sw_prod;
+ bnapi->rx_cons = sw_cons;
+ bnapi->rx_prod = sw_prod;
if (pg_ring_used)
REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
- bp->rx_pg_prod);
+ bnapi->rx_pg_prod);
REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
- REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
+ REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
mmiowb();
{
struct net_device *dev = dev_instance;
struct bnx2 *bp = netdev_priv(dev);
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
- prefetch(bp->status_blk);
+ prefetch(bnapi->status_blk);
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- netif_rx_schedule(dev, &bp->napi);
+ netif_rx_schedule(dev, &bnapi->napi);
return IRQ_HANDLED;
}
{
struct net_device *dev = dev_instance;
struct bnx2 *bp = netdev_priv(dev);
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
- prefetch(bp->status_blk);
+ prefetch(bnapi->status_blk);
/* Return here if interrupt is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- netif_rx_schedule(dev, &bp->napi);
+ netif_rx_schedule(dev, &bnapi->napi);
return IRQ_HANDLED;
}
{
struct net_device *dev = dev_instance;
struct bnx2 *bp = netdev_priv(dev);
- struct status_block *sblk = bp->status_blk;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ struct status_block *sblk = bnapi->status_blk;
/* When using INTx, it is possible for the interrupt to arrive
* at the CPU before the status block posted prior to the
* When using MSI, the MSI message will always complete after
* the status block write.
*/
- if ((sblk->status_idx == bp->last_status_idx) &&
+ if ((sblk->status_idx == bnapi->last_status_idx) &&
(REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
return IRQ_NONE;
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- if (netif_rx_schedule_prep(dev, &bp->napi)) {
- bp->last_status_idx = sblk->status_idx;
- __netif_rx_schedule(dev, &bp->napi);
+ if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
+ bnapi->last_status_idx = sblk->status_idx;
+ __netif_rx_schedule(dev, &bnapi->napi);
}
return IRQ_HANDLED;
}
+static irqreturn_t
+bnx2_tx_msix(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct bnx2 *bp = netdev_priv(dev);
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
+
+ prefetch(bnapi->status_blk_msix);
+
+ /* Return here if interrupt is disabled. */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0))
+ return IRQ_HANDLED;
+
+ netif_rx_schedule(dev, &bnapi->napi);
+ return IRQ_HANDLED;
+}
+
#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
STATUS_ATTN_BITS_TIMER_ABORT)
static inline int
-bnx2_has_work(struct bnx2 *bp)
+bnx2_has_work(struct bnx2_napi *bnapi)
{
+ struct bnx2 *bp = bnapi->bp;
struct status_block *sblk = bp->status_blk;
- if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
- (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
+ if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
+ (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
return 1;
if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
return 0;
}
-static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
+static int bnx2_tx_poll(struct napi_struct *napi, int budget)
{
- struct status_block *sblk = bp->status_blk;
+ struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+ struct bnx2 *bp = bnapi->bp;
+ int work_done = 0;
+ struct status_block_msix *sblk = bnapi->status_blk_msix;
+
+ do {
+ work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
+ if (unlikely(work_done >= budget))
+ return work_done;
+
+ bnapi->last_status_idx = sblk->status_idx;
+ rmb();
+ } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
+
+ netif_rx_complete(bp->dev, napi);
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
+ return work_done;
+}
+
+static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
+ int work_done, int budget)
+{
+ struct status_block *sblk = bnapi->status_blk;
u32 status_attn_bits = sblk->status_attn_bits;
u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
(status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
- bnx2_phy_int(bp);
+ bnx2_phy_int(bp, bnapi);
/* This is needed to take care of transient status
* during link changes.
REG_RD(bp, BNX2_HC_COMMAND);
}
- if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
- bnx2_tx_int(bp);
+ if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
+ bnx2_tx_int(bp, bnapi, 0);
- if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
- work_done += bnx2_rx_int(bp, budget - work_done);
+ if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
+ work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
return work_done;
}
static int bnx2_poll(struct napi_struct *napi, int budget)
{
- struct bnx2 *bp = container_of(napi, struct bnx2, napi);
+ struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+ struct bnx2 *bp = bnapi->bp;
int work_done = 0;
- struct status_block *sblk = bp->status_blk;
+ struct status_block *sblk = bnapi->status_blk;
while (1) {
- work_done = bnx2_poll_work(bp, work_done, budget);
+ work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
if (unlikely(work_done >= budget))
break;
- /* bp->last_status_idx is used below to tell the hw how
+ /* bnapi->last_status_idx is used below to tell the hw how
* much work has been processed, so we must read it before
* checking for more work.
*/
- bp->last_status_idx = sblk->status_idx;
+ bnapi->last_status_idx = sblk->status_idx;
rmb();
- if (likely(!bnx2_has_work(bp))) {
+ if (likely(!bnx2_has_work(bnapi))) {
netif_rx_complete(bp->dev, napi);
- if (likely(bp->flags & USING_MSI_FLAG)) {
+ if (likely(bp->flags & USING_MSI_OR_MSIX_FLAG)) {
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- bp->last_status_idx);
+ bnapi->last_status_idx);
break;
}
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
- bp->last_status_idx);
+ bnapi->last_status_idx);
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- bp->last_status_idx);
+ bnapi->last_status_idx);
break;
}
}
}
}
+static void
+bnx2_setup_msix_tbl(struct bnx2 *bp)
+{
+ REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
+
+ REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
+ REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
+}
+
static int
bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
{
rc = bnx2_alloc_bad_rbuf(bp);
}
+ if (bp->flags & USING_MSIX_FLAG)
+ bnx2_setup_msix_tbl(bp);
+
return rc;
}
bnx2_init_chip(struct bnx2 *bp)
{
u32 val;
- int rc;
+ int rc, i;
/* Make sure the interrupt is not active. */
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
- bp->last_status_idx = 0;
+ for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
+ bp->bnx2_napi[i].last_status_idx = 0;
+
bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
/* Set up how to generate a link change interrupt. */
BNX2_HC_CONFIG_COLLECT_STATS;
}
+ if (bp->flags & USING_MSIX_FLAG) {
+ REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
+ BNX2_HC_MSIX_BIT_VECTOR_VAL);
+
+ REG_WR(bp, BNX2_HC_SB_CONFIG_1,
+ BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
+ BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+ REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP_1,
+ (bp->tx_quick_cons_trip_int << 16) |
+ bp->tx_quick_cons_trip);
+
+ REG_WR(bp, BNX2_HC_TX_TICKS_1,
+ (bp->tx_ticks_int << 16) | bp->tx_ticks);
+
+ val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
+ }
+
if (bp->flags & ONE_SHOT_MSI_FLAG)
val |= BNX2_HC_CONFIG_ONE_SHOT;
}
static void
+bnx2_clear_ring_states(struct bnx2 *bp)
+{
+ struct bnx2_napi *bnapi;
+ int i;
+
+ for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+ bnapi = &bp->bnx2_napi[i];
+
+ bnapi->tx_cons = 0;
+ bnapi->hw_tx_cons = 0;
+ bnapi->rx_prod_bseq = 0;
+ bnapi->rx_prod = 0;
+ bnapi->rx_cons = 0;
+ bnapi->rx_pg_prod = 0;
+ bnapi->rx_pg_cons = 0;
+ }
+}
+
+static void
bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
{
u32 val, offset0, offset1, offset2, offset3;
bnx2_init_tx_ring(struct bnx2 *bp)
{
struct tx_bd *txbd;
- u32 cid;
+ u32 cid = TX_CID;
+ struct bnx2_napi *bnapi;
+
+ bp->tx_vec = 0;
+ if (bp->flags & USING_MSIX_FLAG) {
+ cid = TX_TSS_CID;
+ bp->tx_vec = BNX2_TX_VEC;
+ REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
+ (TX_TSS_CID << 7));
+ }
+ bnapi = &bp->bnx2_napi[bp->tx_vec];
bp->tx_wake_thresh = bp->tx_ring_size / 2;
txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
bp->tx_prod = 0;
- bp->tx_cons = 0;
- bp->hw_tx_cons = 0;
bp->tx_prod_bseq = 0;
- cid = TX_CID;
bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
int i;
u16 prod, ring_prod;
u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
-
- bp->rx_prod = 0;
- bp->rx_cons = 0;
- bp->rx_prod_bseq = 0;
- bp->rx_pg_prod = 0;
- bp->rx_pg_cons = 0;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
bp->rx_buf_use_size, bp->rx_max_ring);
val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
- ring_prod = prod = bp->rx_pg_prod;
+ ring_prod = prod = bnapi->rx_pg_prod;
for (i = 0; i < bp->rx_pg_ring_size; i++) {
if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
break;
prod = NEXT_RX_BD(prod);
ring_prod = RX_PG_RING_IDX(prod);
}
- bp->rx_pg_prod = prod;
+ bnapi->rx_pg_prod = prod;
- ring_prod = prod = bp->rx_prod;
+ ring_prod = prod = bnapi->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
+ if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
break;
}
prod = NEXT_RX_BD(prod);
ring_prod = RX_RING_IDX(prod);
}
- bp->rx_prod = prod;
+ bnapi->rx_prod = prod;
- REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
+ REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
+ bnapi->rx_pg_prod);
REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
- REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
+ REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
}
static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
if ((rc = bnx2_init_chip(bp)) != 0)
return rc;
+ bnx2_clear_ring_states(bp);
bnx2_init_tx_ring(bp);
bnx2_init_rx_ring(bp);
return 0;
struct sw_bd *rx_buf;
struct l2_fhdr *rx_hdr;
int ret = -ENODEV;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
+
+ tx_napi = bnapi;
+ if (bp->flags & USING_MSIX_FLAG)
+ tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
if (loopback_mode == BNX2_MAC_LOOPBACK) {
bp->loopback = MAC_LOOPBACK;
REG_RD(bp, BNX2_HC_COMMAND);
udelay(5);
- rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
+ rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
num_pkts = 0;
pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
+ if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
goto loopback_test_done;
- }
- rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
+ rx_idx = bnx2_get_hw_rx_cons(bnapi);
if (rx_idx != rx_start_idx + num_pkts) {
goto loopback_test_done;
}
bnx2_request_irq(struct bnx2 *bp)
{
struct net_device *dev = bp->dev;
- int rc = 0;
-
- if (bp->flags & USING_MSI_FLAG) {
- irq_handler_t fn = bnx2_msi;
+ unsigned long flags;
+ struct bnx2_irq *irq;
+ int rc = 0, i;
- if (bp->flags & ONE_SHOT_MSI_FLAG)
- fn = bnx2_msi_1shot;
+ if (bp->flags & USING_MSI_OR_MSIX_FLAG)
+ flags = 0;
+ else
+ flags = IRQF_SHARED;
- rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
- } else
- rc = request_irq(bp->pdev->irq, bnx2_interrupt,
- IRQF_SHARED, dev->name, dev);
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ irq = &bp->irq_tbl[i];
+ rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ dev);
+ if (rc)
+ break;
+ irq->requested = 1;
+ }
return rc;
}
bnx2_free_irq(struct bnx2 *bp)
{
struct net_device *dev = bp->dev;
+ struct bnx2_irq *irq;
+ int i;
- if (bp->flags & USING_MSI_FLAG) {
- free_irq(bp->pdev->irq, dev);
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ irq = &bp->irq_tbl[i];
+ if (irq->requested)
+ free_irq(irq->vector, dev);
+ irq->requested = 0;
+ }
+ if (bp->flags & USING_MSI_FLAG)
pci_disable_msi(bp->pdev);
- bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
- } else
- free_irq(bp->pdev->irq, dev);
+ else if (bp->flags & USING_MSIX_FLAG)
+ pci_disable_msix(bp->pdev);
+
+ bp->flags &= ~(USING_MSI_OR_MSIX_FLAG | ONE_SHOT_MSI_FLAG);
+}
+
+static void
+bnx2_enable_msix(struct bnx2 *bp)
+{
+ int i, rc;
+ struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
+
+ bnx2_setup_msix_tbl(bp);
+ REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
+ REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
+ REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+
+ for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
+ if (rc != 0)
+ return;
+
+ bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
+ bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
+
+ strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
+ strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
+ strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
+ strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
+
+ bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
+ bp->flags |= USING_MSIX_FLAG | ONE_SHOT_MSI_FLAG;
+ for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
+ bp->irq_tbl[i].vector = msix_ent[i].vector;
+}
+
+static void
+bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
+{
+ bp->irq_tbl[0].handler = bnx2_interrupt;
+ strcpy(bp->irq_tbl[0].name, bp->dev->name);
+ bp->irq_nvecs = 1;
+ bp->irq_tbl[0].vector = bp->pdev->irq;
+
+ if ((bp->flags & MSIX_CAP_FLAG) && !dis_msi)
+ bnx2_enable_msix(bp);
+
+ if ((bp->flags & MSI_CAP_FLAG) && !dis_msi &&
+ !(bp->flags & USING_MSIX_FLAG)) {
+ if (pci_enable_msi(bp->pdev) == 0) {
+ bp->flags |= USING_MSI_FLAG;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ bp->flags |= ONE_SHOT_MSI_FLAG;
+ bp->irq_tbl[0].handler = bnx2_msi_1shot;
+ } else
+ bp->irq_tbl[0].handler = bnx2_msi;
+
+ bp->irq_tbl[0].vector = bp->pdev->irq;
+ }
+ }
}
/* Called with rtnl_lock */
if (rc)
return rc;
- napi_enable(&bp->napi);
-
- if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
- if (pci_enable_msi(bp->pdev) == 0) {
- bp->flags |= USING_MSI_FLAG;
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
- bp->flags |= ONE_SHOT_MSI_FLAG;
- }
- }
+ bnx2_setup_int_mode(bp, disable_msi);
+ bnx2_napi_enable(bp);
rc = bnx2_request_irq(bp);
if (rc) {
- napi_disable(&bp->napi);
+ bnx2_napi_disable(bp);
bnx2_free_mem(bp);
return rc;
}
rc = bnx2_init_nic(bp);
if (rc) {
- napi_disable(&bp->napi);
+ bnx2_napi_disable(bp);
bnx2_free_irq(bp);
bnx2_free_skbs(bp);
bnx2_free_mem(bp);
bnx2_disable_int(bp);
bnx2_free_irq(bp);
+ bnx2_setup_int_mode(bp, 1);
+
rc = bnx2_init_nic(bp);
if (!rc)
rc = bnx2_request_irq(bp);
if (rc) {
- napi_disable(&bp->napi);
+ bnx2_napi_disable(bp);
bnx2_free_skbs(bp);
bnx2_free_mem(bp);
del_timer_sync(&bp->timer);
bnx2_enable_int(bp);
}
}
- if (bp->flags & USING_MSI_FLAG) {
+ if (bp->flags & USING_MSI_FLAG)
printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
- }
+ else if (bp->flags & USING_MSIX_FLAG)
+ printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
netif_start_queue(dev);
u32 len, vlan_tag_flags, last_frag, mss;
u16 prod, ring_prod;
int i;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
- if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(bnx2_tx_avail(bp, bnapi) <
+ (skb_shinfo(skb)->nr_frags + 1))) {
netif_stop_queue(dev);
printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
dev->name);
vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
}
- if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
+ if (bp->vlgrp && vlan_tx_tag_present(skb)) {
vlan_tag_flags |=
(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
}
bp->tx_prod = prod;
dev->trans_start = jiffies;
- if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
+ if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
netif_stop_queue(dev);
- if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
+ if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
netif_wake_queue(dev);
}
msleep(1);
bnx2_disable_int_sync(bp);
- napi_disable(&bp->napi);
+ bnx2_napi_disable(bp);
del_timer_sync(&bp->timer);
if (bp->flags & NO_WOL_FLAG)
reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
}
}
+ if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+ bp->flags |= MSIX_CAP_FLAG;
+ }
+
if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
bp->flags |= MSI_CAP_FLAG;
return str;
}
+static void __devinit
+bnx2_init_napi(struct bnx2 *bp)
+{
+ int i;
+ struct bnx2_napi *bnapi;
+
+ for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+ bnapi = &bp->bnx2_napi[i];
+ bnapi->bp = bp;
+ }
+ netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
+ netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
+ 64);
+}
+
static int __devinit
bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
dev->ethtool_ops = &bnx2_ethtool_ops;
bp = netdev_priv(dev);
- netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
+ bnx2_init_napi(bp);
#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
dev->poll_controller = poll_bnx2;