#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/mdio.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/delay.h>
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.26.20"
+#define DRV_VERSION "2.0.26.25"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
-#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
-#define PANIC 1
-#define LOW 2
-static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
-{
- struct mac_info *mac_control;
-
- mac_control = &sp->mac_control;
- if (rxb_size <= rxd_count[sp->rxd_mode])
- return PANIC;
- else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
- return LOW;
- return 0;
-}
static inline int is_s2io_card_up(const struct s2io_nic * sp)
{
sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
}
+
/* Add the vlan */
static void s2io_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp)
+ struct vlan_group *grp)
{
int i;
- struct s2io_nic *nic = dev->priv;
+ struct s2io_nic *nic = netdev_priv(dev);
unsigned long flags[MAX_TX_FIFOS];
struct mac_info *mac_control = &nic->mac_control;
struct config_param *config = &nic->config;
flags[i]);
}
-/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
-static int vlan_strip_flag;
-
/* Unregister the vlan */
-static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
+static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
int i;
- struct s2io_nic *nic = dev->priv;
+ struct s2io_nic *nic = netdev_priv(dev);
unsigned long flags[MAX_TX_FIFOS];
struct mac_info *mac_control = &nic->mac_control;
struct config_param *config = &nic->config;
/* netqueue manipulation helper functions */
static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
{
- int i;
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- if (sp->config.multiq) {
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- netif_stop_subqueue(sp->dev, i);
- } else
-#endif
- {
+ if (!sp->config.multiq) {
+ int i;
+
for (i = 0; i < sp->config.tx_fifo_num; i++)
sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
- netif_stop_queue(sp->dev);
}
+ netif_tx_stop_all_queues(sp->dev);
}
static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- if (sp->config.multiq)
- netif_stop_subqueue(sp->dev, fifo_no);
- else
-#endif
- {
+ if (!sp->config.multiq)
sp->mac_control.fifos[fifo_no].queue_state =
FIFO_QUEUE_STOP;
- netif_stop_queue(sp->dev);
- }
+
+ netif_tx_stop_all_queues(sp->dev);
}
static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
{
- int i;
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- if (sp->config.multiq) {
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- netif_start_subqueue(sp->dev, i);
- } else
-#endif
- {
+ if (!sp->config.multiq) {
+ int i;
+
for (i = 0; i < sp->config.tx_fifo_num; i++)
sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
- netif_start_queue(sp->dev);
}
+ netif_tx_start_all_queues(sp->dev);
}
static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- if (sp->config.multiq)
- netif_start_subqueue(sp->dev, fifo_no);
- else
-#endif
- {
+ if (!sp->config.multiq)
sp->mac_control.fifos[fifo_no].queue_state =
FIFO_QUEUE_START;
- netif_start_queue(sp->dev);
- }
+
+ netif_tx_start_all_queues(sp->dev);
}
static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
{
- int i;
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- if (sp->config.multiq) {
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- netif_wake_subqueue(sp->dev, i);
- } else
-#endif
- {
+ if (!sp->config.multiq) {
+ int i;
+
for (i = 0; i < sp->config.tx_fifo_num; i++)
sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
- netif_wake_queue(sp->dev);
}
+ netif_tx_wake_all_queues(sp->dev);
}
static inline void s2io_wake_tx_queue(
struct fifo_info *fifo, int cnt, u8 multiq)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
if (multiq) {
if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
netif_wake_subqueue(fifo->dev, fifo->fifo_no);
- } else
-#endif
- if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
+ } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
if (netif_queue_stopped(fifo->dev)) {
fifo->queue_state = FIFO_QUEUE_START;
netif_wake_queue(fifo->dev);
config->rx_cfg[i].num_rxd - 1;
mac_control->rings[i].nic = nic;
mac_control->rings[i].ring_no = i;
+ mac_control->rings[i].lro = lro_enable;
blk_cnt = config->rx_cfg[i].num_rxd /
(rxd_count[nic->rxd_mode] + 1);
struct pci_dev *tdev = NULL;
while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
- if (tdev->bus == s2io_pdev->bus->parent)
+ if (tdev->bus == s2io_pdev->bus->parent) {
pci_dev_put(tdev);
return 1;
+ }
}
}
return 0;
TTI_DATA1_MEM_TX_URNG_B(0x10) |
TTI_DATA1_MEM_TX_URNG_C(0x30) |
TTI_DATA1_MEM_TX_TIMER_AC_EN;
-
- if (use_continuous_tx_intrs && (link == LINK_UP))
- val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
+ if (i == 0)
+ if (use_continuous_tx_intrs && (link == LINK_UP))
+ val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
writeq(val64, &bar0->tti_data1_mem);
- val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
- TTI_DATA2_MEM_TX_UFC_B(0x20) |
- TTI_DATA2_MEM_TX_UFC_C(0x40) |
- TTI_DATA2_MEM_TX_UFC_D(0x80);
+ if (nic->config.intr_type == MSI_X) {
+ val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
+ TTI_DATA2_MEM_TX_UFC_B(0x100) |
+ TTI_DATA2_MEM_TX_UFC_C(0x200) |
+ TTI_DATA2_MEM_TX_UFC_D(0x300);
+ } else {
+ if ((nic->config.tx_steering_type ==
+ TX_DEFAULT_STEERING) &&
+ (config->tx_fifo_num > 1) &&
+ (i >= nic->udp_fifo_idx) &&
+ (i < (nic->udp_fifo_idx +
+ nic->total_udp_fifos)))
+ val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
+ TTI_DATA2_MEM_TX_UFC_B(0x80) |
+ TTI_DATA2_MEM_TX_UFC_C(0x100) |
+ TTI_DATA2_MEM_TX_UFC_D(0x120);
+ else
+ val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
+ TTI_DATA2_MEM_TX_UFC_B(0x20) |
+ TTI_DATA2_MEM_TX_UFC_C(0x40) |
+ TTI_DATA2_MEM_TX_UFC_D(0x80);
+ }
writeq(val64, &bar0->tti_data2_mem);
writeq(val64, &bar0->tx_fifo_partition_0);
/* Filling the Rx round robin registers as per the
- * number of Rings and steering based on QoS.
- */
+ * number of Rings and steering based on QoS with
+ * equal priority.
+ */
switch (config->rx_ring_num) {
case 1:
+ val64 = 0x0;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
val64 = 0x8080808080808080ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 2:
- val64 = 0x0000010000010000ULL;
+ val64 = 0x0001000100010001ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0100000100000100ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0001000001000001ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0000010000010000ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0100000000000000ULL;
+ val64 = 0x0001000100000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080808040404040ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 3:
- val64 = 0x0001000102000001ULL;
+ val64 = 0x0001020001020001ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0001020000010001ULL;
+ val64 = 0x0200010200010200ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0200000100010200ULL;
+ val64 = 0x0102000102000102ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0001000102000001ULL;
+ val64 = 0x0001020001020001ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0001020000000000ULL;
+ val64 = 0x0200010200000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080804040402020ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 4:
- val64 = 0x0001020300010200ULL;
+ val64 = 0x0001020300010203ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0100000102030001ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0200010000010203ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0001020001000001ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0203000100000000ULL;
+ val64 = 0x0001020300000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080404020201010ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 5:
- val64 = 0x0001000203000102ULL;
+ val64 = 0x0001020304000102ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0001020001030004ULL;
+ val64 = 0x0304000102030400ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0001000203000102ULL;
+ val64 = 0x0102030400010203ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0001020001030004ULL;
+ val64 = 0x0400010203040001ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0001000000000000ULL;
+ val64 = 0x0203040000000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080404020201008ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 6:
- val64 = 0x0001020304000102ULL;
+ val64 = 0x0001020304050001ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0304050001020001ULL;
+ val64 = 0x0203040500010203ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0203000100000102ULL;
+ val64 = 0x0405000102030405ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0304000102030405ULL;
+ val64 = 0x0001020304050001ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0001000200000000ULL;
+ val64 = 0x0203040500000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080404020100804ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 7:
- val64 = 0x0001020001020300ULL;
+ val64 = 0x0001020304050600ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0102030400010203ULL;
+ val64 = 0x0102030405060001ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0405060001020001ULL;
+ val64 = 0x0203040506000102ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0304050000010200ULL;
+ val64 = 0x0304050600010203ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0102030000000000ULL;
+ val64 = 0x0405060000000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080402010080402ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 8:
- val64 = 0x0001020300040105ULL;
+ val64 = 0x0001020304050607ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0200030106000204ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0103000502010007ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0304010002060500ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0103020400000000ULL;
+ val64 = 0x0001020300000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8040201008040201ULL;
* by then we return error.
*/
time = 0;
- while (TRUE) {
+ while (true) {
val64 = readq(&bar0->rti_command_mem);
if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
break;
static int s2io_link_fault_indication(struct s2io_nic *nic)
{
- if (nic->config.intr_type != INTA)
- return MAC_RMAC_ERR_TIMER;
if (nic->device_type == XFRAME_II_DEVICE)
return LINK_UP_DOWN_INTERRUPT;
else
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 gen_int_mask = 0;
+ u64 interruptible;
+ writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
if (mask & TX_DMA_INTR) {
gen_int_mask |= TXDMA_INT_M;
gen_int_mask |= RXMAC_INT_M;
do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
&bar0->mac_int_mask);
- do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
+ interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
- RMAC_DOUBLE_ECC_ERR |
- RMAC_LINK_STATE_CHANGE_INT,
+ RMAC_DOUBLE_ECC_ERR;
+ if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
+ interruptible |= RMAC_LINK_STATE_CHANGE_INT;
+ do_s2io_write_bits(interruptible,
flag, &bar0->mac_rmac_err_mask);
}
herc = (sp->device_type == XFRAME_II_DEVICE);
- if (flag == FALSE) {
+ if (flag == false) {
if ((!herc && (sp->pdev->revision >= 4)) || herc) {
if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
ret = 1;
val64 = readq(&bar0->rx_pa_cfg);
val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
writeq(val64, &bar0->rx_pa_cfg);
- vlan_strip_flag = 0;
+ nic->vlan_strip_flag = 0;
}
/*
/**
* fill_rx_buffers - Allocates the Rx side skbs
- * @nic: device private variable
- * @ring_no: ring number
+ * @ring_info: per ring structure
+ * @from_card_up: If this is true, we will map the buffer to get
+ * the dma address for buf0 and buf1 to give it to the card.
+ * Else we will sync the already mapped buffer to give it to the card.
* Description:
* The function allocates Rx side skbs and puts the physical
* address of these buffers into the RxD buffer pointers, so that the NIC
* Return Value:
* SUCCESS on success or an appropriate -ve value on failure.
*/
-
-static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
+static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
+ int from_card_up)
{
- struct net_device *dev = nic->dev;
struct sk_buff *skb;
struct RxD_t *rxdp;
- int off, off1, size, block_no, block_no1;
+ int off, size, block_no, block_no1;
u32 alloc_tab = 0;
u32 alloc_cnt;
- struct mac_info *mac_control;
- struct config_param *config;
u64 tmp;
struct buffAdd *ba;
- unsigned long flags;
struct RxD_t *first_rxdp = NULL;
u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
+ int rxd_index = 0;
struct RxD1 *rxdp1;
struct RxD3 *rxdp3;
- struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
+ struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
- mac_control = &nic->mac_control;
- config = &nic->config;
- alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
- atomic_read(&nic->rx_bufs_left[ring_no]);
+ alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
- block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
- off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
+ block_no1 = ring->rx_curr_get_info.block_index;
while (alloc_tab < alloc_cnt) {
- block_no = mac_control->rings[ring_no].rx_curr_put_info.
- block_index;
- off = mac_control->rings[ring_no].rx_curr_put_info.offset;
+ block_no = ring->rx_curr_put_info.block_index;
- rxdp = mac_control->rings[ring_no].
- rx_blocks[block_no].rxds[off].virt_addr;
+ off = ring->rx_curr_put_info.offset;
- if ((block_no == block_no1) && (off == off1) &&
- (rxdp->Host_Control)) {
+ rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
+
+ rxd_index = off + 1;
+ if (block_no)
+ rxd_index += (block_no * ring->rxd_count);
+
+ if ((block_no == block_no1) &&
+ (off == ring->rx_curr_get_info.offset) &&
+ (rxdp->Host_Control)) {
DBG_PRINT(INTR_DBG, "%s: Get and Put",
- dev->name);
+ ring->dev->name);
DBG_PRINT(INTR_DBG, " info equated\n");
goto end;
}
- if (off && (off == rxd_count[nic->rxd_mode])) {
- mac_control->rings[ring_no].rx_curr_put_info.
- block_index++;
- if (mac_control->rings[ring_no].rx_curr_put_info.
- block_index == mac_control->rings[ring_no].
- block_count)
- mac_control->rings[ring_no].rx_curr_put_info.
- block_index = 0;
- block_no = mac_control->rings[ring_no].
- rx_curr_put_info.block_index;
- if (off == rxd_count[nic->rxd_mode])
- off = 0;
- mac_control->rings[ring_no].rx_curr_put_info.
- offset = off;
- rxdp = mac_control->rings[ring_no].
- rx_blocks[block_no].block_virt_addr;
+ if (off && (off == ring->rxd_count)) {
+ ring->rx_curr_put_info.block_index++;
+ if (ring->rx_curr_put_info.block_index ==
+ ring->block_count)
+ ring->rx_curr_put_info.block_index = 0;
+ block_no = ring->rx_curr_put_info.block_index;
+ off = 0;
+ ring->rx_curr_put_info.offset = off;
+ rxdp = ring->rx_blocks[block_no].block_virt_addr;
DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
- dev->name, rxdp);
- }
- if(!napi) {
- spin_lock_irqsave(&nic->put_lock, flags);
- mac_control->rings[ring_no].put_pos =
- (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
- spin_unlock_irqrestore(&nic->put_lock, flags);
- } else {
- mac_control->rings[ring_no].put_pos =
- (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
+ ring->dev->name, rxdp);
+
}
+
if ((rxdp->Control_1 & RXD_OWN_XENA) &&
- ((nic->rxd_mode == RXD_MODE_3B) &&
+ ((ring->rxd_mode == RXD_MODE_3B) &&
(rxdp->Control_2 & s2BIT(0)))) {
- mac_control->rings[ring_no].rx_curr_put_info.
- offset = off;
+ ring->rx_curr_put_info.offset = off;
goto end;
}
/* calculate size of skb based on ring mode */
- size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
+ size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
- if (nic->rxd_mode == RXD_MODE_1)
+ if (ring->rxd_mode == RXD_MODE_1)
size += NET_IP_ALIGN;
else
- size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
+ size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
/* allocate skb */
skb = dev_alloc_skb(size);
if(!skb) {
- DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
+ DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
if (first_rxdp) {
wmb();
first_rxdp->Control_1 |= RXD_OWN_XENA;
}
- nic->mac_control.stats_info->sw_stat. \
- mem_alloc_fail_cnt++;
+ stats->mem_alloc_fail_cnt++;
+
return -ENOMEM ;
}
- nic->mac_control.stats_info->sw_stat.mem_allocated
- += skb->truesize;
- if (nic->rxd_mode == RXD_MODE_1) {
+ stats->mem_allocated += skb->truesize;
+
+ if (ring->rxd_mode == RXD_MODE_1) {
/* 1 buffer mode - normal operation mode */
rxdp1 = (struct RxD1*)rxdp;
memset(rxdp, 0, sizeof(struct RxD1));
skb_reserve(skb, NET_IP_ALIGN);
rxdp1->Buffer0_ptr = pci_map_single
- (nic->pdev, skb->data, size - NET_IP_ALIGN,
+ (ring->pdev, skb->data, size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
- if( (rxdp1->Buffer0_ptr == 0) ||
- (rxdp1->Buffer0_ptr ==
- DMA_ERROR_CODE))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp1->Buffer0_ptr))
goto pci_map_failed;
rxdp->Control_2 =
SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
-
- } else if (nic->rxd_mode == RXD_MODE_3B) {
+ rxdp->Host_Control = (unsigned long) (skb);
+ } else if (ring->rxd_mode == RXD_MODE_3B) {
/*
* 2 buffer mode -
* 2 buffer mode provides 128
rxdp3->Buffer0_ptr = Buffer0_ptr;
rxdp3->Buffer1_ptr = Buffer1_ptr;
- ba = &mac_control->rings[ring_no].ba[block_no][off];
+ ba = &ring->ba[block_no][off];
skb_reserve(skb, BUF0_LEN);
tmp = (u64)(unsigned long) skb->data;
tmp += ALIGN_SIZE;
skb->data = (void *) (unsigned long)tmp;
skb_reset_tail_pointer(skb);
- if (!(rxdp3->Buffer0_ptr))
+ if (from_card_up) {
rxdp3->Buffer0_ptr =
- pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
- PCI_DMA_FROMDEVICE);
- else
- pci_dma_sync_single_for_device(nic->pdev,
+ pci_map_single(ring->pdev, ba->ba_0,
+ BUF0_LEN, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer0_ptr))
+ goto pci_map_failed;
+ } else
+ pci_dma_sync_single_for_device(ring->pdev,
(dma_addr_t) rxdp3->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);
- if( (rxdp3->Buffer0_ptr == 0) ||
- (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
- goto pci_map_failed;
rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
- if (nic->rxd_mode == RXD_MODE_3B) {
+ if (ring->rxd_mode == RXD_MODE_3B) {
/* Two buffer mode */
/*
* L4 payload
*/
rxdp3->Buffer2_ptr = pci_map_single
- (nic->pdev, skb->data, dev->mtu + 4,
+ (ring->pdev, skb->data, ring->mtu + 4,
PCI_DMA_FROMDEVICE);
- if( (rxdp3->Buffer2_ptr == 0) ||
- (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer2_ptr))
goto pci_map_failed;
- rxdp3->Buffer1_ptr =
- pci_map_single(nic->pdev,
+ if (from_card_up) {
+ rxdp3->Buffer1_ptr =
+ pci_map_single(ring->pdev,
ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
- if( (rxdp3->Buffer1_ptr == 0) ||
- (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
- pci_unmap_single
- (nic->pdev,
- (dma_addr_t)rxdp3->Buffer2_ptr,
- dev->mtu + 4,
- PCI_DMA_FROMDEVICE);
- goto pci_map_failed;
+
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer1_ptr)) {
+ pci_unmap_single
+ (ring->pdev,
+ (dma_addr_t)(unsigned long)
+ skb->data,
+ ring->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+ goto pci_map_failed;
+ }
}
rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
rxdp->Control_2 |= SET_BUFFER2_SIZE_3
- (dev->mtu + 4);
+ (ring->mtu + 4);
}
rxdp->Control_2 |= s2BIT(0);
+ rxdp->Host_Control = (unsigned long) (skb);
}
- rxdp->Host_Control = (unsigned long) (skb);
if (alloc_tab & ((1 << rxsync_frequency) - 1))
rxdp->Control_1 |= RXD_OWN_XENA;
off++;
- if (off == (rxd_count[nic->rxd_mode] + 1))
+ if (off == (ring->rxd_count + 1))
off = 0;
- mac_control->rings[ring_no].rx_curr_put_info.offset = off;
+ ring->rx_curr_put_info.offset = off;
rxdp->Control_2 |= SET_RXD_MARKER;
if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
}
first_rxdp = rxdp;
}
- atomic_inc(&nic->rx_bufs_left[ring_no]);
+ ring->rx_bufs_left += 1;
alloc_tab++;
}
}
sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
dev_kfree_skb(skb);
- atomic_dec(&sp->rx_bufs_left[ring_no]);
+ mac_control->rings[ring_no].rx_bufs_left -= 1;
}
}
mac_control->rings[i].rx_curr_get_info.block_index = 0;
mac_control->rings[i].rx_curr_put_info.offset = 0;
mac_control->rings[i].rx_curr_get_info.offset = 0;
- atomic_set(&sp->rx_bufs_left[i], 0);
+ mac_control->rings[i].rx_bufs_left = 0;
DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
dev->name, buf_cnt, i);
}
}
+static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
+{
+ if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
+ DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
+ DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
+ }
+ return 0;
+}
+
/**
* s2io_poll - Rx interrupt handler for NAPI support
* @napi : pointer to the napi structure.
* 0 on success and 1 if there are No Rx packets to be processed.
*/
-static int s2io_poll(struct napi_struct *napi, int budget)
+static int s2io_poll_msix(struct napi_struct *napi, int budget)
{
- struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
- struct net_device *dev = nic->dev;
- int pkt_cnt = 0, org_pkts_to_process;
- struct mac_info *mac_control;
+ struct ring_info *ring = container_of(napi, struct ring_info, napi);
+ struct net_device *dev = ring->dev;
struct config_param *config;
+ struct mac_info *mac_control;
+ int pkts_processed = 0;
+ u8 __iomem *addr = NULL;
+ u8 val8 = 0;
+ struct s2io_nic *nic = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = nic->bar0;
- int i;
+ int budget_org = budget;
- mac_control = &nic->mac_control;
config = &nic->config;
+ mac_control = &nic->mac_control;
- nic->pkts_to_process = budget;
- org_pkts_to_process = nic->pkts_to_process;
+ if (unlikely(!is_s2io_card_up(nic)))
+ return 0;
- writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
- readl(&bar0->rx_traffic_int);
+ pkts_processed = rx_intr_handler(ring, budget);
+ s2io_chk_rx_buffers(nic, ring);
- for (i = 0; i < config->rx_ring_num; i++) {
- rx_intr_handler(&mac_control->rings[i]);
- pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
- if (!nic->pkts_to_process) {
- /* Quota for the current iteration has been met */
- goto no_rx;
- }
+ if (pkts_processed < budget_org) {
+ napi_complete(napi);
+ /*Re Enable MSI-Rx Vector*/
+ addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
+ addr += 7 - ring->ring_no;
+ val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
+ writeb(val8, addr);
+ val8 = readb(addr);
}
+ return pkts_processed;
+}
+static int s2io_poll_inta(struct napi_struct *napi, int budget)
+{
+ struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
+ struct ring_info *ring;
+ struct config_param *config;
+ struct mac_info *mac_control;
+ int pkts_processed = 0;
+ int ring_pkts_processed, i;
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ int budget_org = budget;
- netif_rx_complete(dev, napi);
+ config = &nic->config;
+ mac_control = &nic->mac_control;
- for (i = 0; i < config->rx_ring_num; i++) {
- if (fill_rx_buffers(nic, i) == -ENOMEM) {
- DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
- DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
- break;
- }
- }
- /* Re enable the Rx interrupts. */
- writeq(0x0, &bar0->rx_traffic_mask);
- readl(&bar0->rx_traffic_mask);
- return pkt_cnt;
+ if (unlikely(!is_s2io_card_up(nic)))
+ return 0;
-no_rx:
for (i = 0; i < config->rx_ring_num; i++) {
- if (fill_rx_buffers(nic, i) == -ENOMEM) {
- DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
- DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
+ ring = &mac_control->rings[i];
+ ring_pkts_processed = rx_intr_handler(ring, budget);
+ s2io_chk_rx_buffers(nic, ring);
+ pkts_processed += ring_pkts_processed;
+ budget -= ring_pkts_processed;
+ if (budget <= 0)
break;
- }
}
- return pkt_cnt;
+ if (pkts_processed < budget_org) {
+ napi_complete(napi);
+ /* Re enable the Rx interrupts for the ring */
+ writeq(0, &bar0->rx_traffic_mask);
+ readl(&bar0->rx_traffic_mask);
+ }
+ return pkts_processed;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
*/
static void s2io_netpoll(struct net_device *dev)
{
- struct s2io_nic *nic = dev->priv;
+ struct s2io_nic *nic = netdev_priv(dev);
struct mac_info *mac_control;
struct config_param *config;
struct XENA_dev_config __iomem *bar0 = nic->bar0;
/* check for received packet and indicate up to network */
for (i = 0; i < config->rx_ring_num; i++)
- rx_intr_handler(&mac_control->rings[i]);
+ rx_intr_handler(&mac_control->rings[i], 0);
for (i = 0; i < config->rx_ring_num; i++) {
- if (fill_rx_buffers(nic, i) == -ENOMEM) {
+ if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
+ -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
break;
/**
* rx_intr_handler - Rx interrupt handler
- * @nic: device private variable.
+ * @ring_info: per ring structure.
+ * @budget: budget for napi processing.
* Description:
* If the interrupt is because of a received frame or if the
* receive ring contains fresh as yet un-processed frames,this function is
* stopped and sends the skb to the OSM's Rx handler and then increments
* the offset.
* Return Value:
- * NONE.
+ * No. of napi packets processed.
*/
-static void rx_intr_handler(struct ring_info *ring_data)
+static int rx_intr_handler(struct ring_info *ring_data, int budget)
{
- struct s2io_nic *nic = ring_data->nic;
- struct net_device *dev = (struct net_device *) nic->dev;
- int get_block, put_block, put_offset;
+ int get_block, put_block;
struct rx_curr_get_info get_info, put_info;
struct RxD_t *rxdp;
struct sk_buff *skb;
- int pkt_cnt = 0;
+ int pkt_cnt = 0, napi_pkts = 0;
int i;
struct RxD1* rxdp1;
struct RxD3* rxdp3;
- spin_lock(&nic->rx_lock);
-
get_info = ring_data->rx_curr_get_info;
get_block = get_info.block_index;
memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
put_block = put_info.block_index;
rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
- if (!napi) {
- spin_lock(&nic->put_lock);
- put_offset = ring_data->put_pos;
- spin_unlock(&nic->put_lock);
- } else
- put_offset = ring_data->put_pos;
while (RXD_IS_UP2DT(rxdp)) {
/*
*/
if ((get_block == put_block) &&
(get_info.offset + 1) == put_info.offset) {
- DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
+ DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
+ ring_data->dev->name);
break;
}
skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
if (skb == NULL) {
DBG_PRINT(ERR_DBG, "%s: The skb is ",
- dev->name);
+ ring_data->dev->name);
DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
- spin_unlock(&nic->rx_lock);
- return;
+ return 0;
}
- if (nic->rxd_mode == RXD_MODE_1) {
+ if (ring_data->rxd_mode == RXD_MODE_1) {
rxdp1 = (struct RxD1*)rxdp;
- pci_unmap_single(nic->pdev, (dma_addr_t)
+ pci_unmap_single(ring_data->pdev, (dma_addr_t)
rxdp1->Buffer0_ptr,
- dev->mtu +
+ ring_data->mtu +
HEADER_ETHERNET_II_802_3_SIZE +
HEADER_802_2_SIZE +
HEADER_SNAP_SIZE,
PCI_DMA_FROMDEVICE);
- } else if (nic->rxd_mode == RXD_MODE_3B) {
+ } else if (ring_data->rxd_mode == RXD_MODE_3B) {
rxdp3 = (struct RxD3*)rxdp;
- pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
+ pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
rxdp3->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);
- pci_unmap_single(nic->pdev, (dma_addr_t)
+ pci_unmap_single(ring_data->pdev, (dma_addr_t)
rxdp3->Buffer2_ptr,
- dev->mtu + 4,
+ ring_data->mtu + 4,
PCI_DMA_FROMDEVICE);
}
prefetch(skb->data);
ring_data->rx_curr_get_info.offset = get_info.offset;
rxdp = ring_data->rx_blocks[get_block].
rxds[get_info.offset].virt_addr;
- if (get_info.offset == rxd_count[nic->rxd_mode]) {
+ if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
get_info.offset = 0;
ring_data->rx_curr_get_info.offset = get_info.offset;
get_block++;
rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
}
- nic->pkts_to_process -= 1;
- if ((napi) && (!nic->pkts_to_process))
- break;
+ if (ring_data->nic->config.napi) {
+ budget--;
+ napi_pkts++;
+ if (!budget)
+ break;
+ }
pkt_cnt++;
if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
break;
}
- if (nic->lro) {
+ if (ring_data->lro) {
/* Clear all LRO sessions before exiting */
for (i=0; i<MAX_LRO_SESSIONS; i++) {
- struct lro *lro = &nic->lro0_n[i];
+ struct lro *lro = &ring_data->lro0_n[i];
if (lro->in_use) {
- update_L3L4_header(nic, lro);
+ update_L3L4_header(ring_data->nic, lro);
queue_rx_frame(lro->parent, lro->vlan_tag);
clear_lro_session(lro);
}
}
}
-
- spin_unlock(&nic->rx_lock);
+ return(napi_pkts);
}
/**
if (skb == NULL) {
spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
DBG_PRINT(ERR_DBG, "%s: Null skb ",
- __FUNCTION__);
+ __func__);
DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
return;
}
pkt_cnt++;
/* Updating the statistics block */
- nic->stats.tx_bytes += skb->len;
+ nic->dev->stats.tx_bytes += skb->len;
nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
dev_kfree_skb_irq(skb);
static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
{
u64 val64 = 0x0;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
//address transaction
{
u64 val64 = 0x0;
u64 rval64 = 0x0;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
/* address transaction */
u64 val64 = 0x0;
u64 addr = 0x0;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct stat_block *stat_info = sp->mac_control.stats_info;
/* Check the communication with the MDIO slave */
- addr = 0x0000;
+ addr = MDIO_CTRL1;
val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
if((val64 == 0xFFFF) || (val64 == 0x0000))
{
DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
return;
}
- /* Check for the expecte value of 2040 at PMA address 0x0000 */
- if(val64 != 0x2040)
+ /* Check for the expected value of control reg 1 */
+ if(val64 != MDIO_CTRL1_SPEED10G)
{
DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
- DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
- (unsigned long long)val64);
+ DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x%x\n",
+ (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
return;
}
/* Loading the DOM register to MDIO register */
addr = 0xA100;
- s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
- val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+ s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
/* Reading the Alarm flags */
addr = 0xA070;
val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
flag = CHECKBIT(val64, 0x7);
type = 1;
/* Reading the Warning flags */
addr = 0xA074;
val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
if(CHECKBIT(val64, 0x7))
stat_info->xpak_stat.warn_transceiver_temp_high++;
unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
- __FUNCTION__, sp->dev->name);
+ __func__, sp->dev->name);
/* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
}
if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
- DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
+ DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__);
}
pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
writeq(val64, &bar0->pcc_err_reg);
}
- sp->device_enabled_once = FALSE;
+ sp->device_enabled_once = false;
}
/**
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64;
- int i;
+ int i, msix_index;
+
+
+ if (nic->device_type == XFRAME_I_DEVICE)
+ return;
for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
+ msix_index = (i) ? ((i-1) * 8 + 1): 0;
writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
writeq(nic->msix_info[i].data, &bar0->xmsi_data);
- val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
+ val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access);
- if (wait_for_msix_trans(nic, i)) {
- DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
+ if (wait_for_msix_trans(nic, msix_index)) {
+ DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
continue;
}
}
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64, addr, data;
- int i;
+ int i, msix_index;
+
+ if (nic->device_type == XFRAME_I_DEVICE)
+ return;
/* Store and display */
for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
- val64 = (s2BIT(15) | vBIT(i, 26, 6));
+ msix_index = (i) ? ((i-1) * 8 + 1): 0;
+ val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access);
- if (wait_for_msix_trans(nic, i)) {
- DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
+ if (wait_for_msix_trans(nic, msix_index)) {
+ DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
continue;
}
addr = readq(&bar0->xmsi_address);
static int s2io_enable_msi_x(struct s2io_nic *nic)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 tx_mat, rx_mat;
+ u64 rx_mat;
u16 msi_control; /* Temp variable */
int ret, i, j, msix_indx = 1;
- nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
+ nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
GFP_KERNEL);
if (!nic->entries) {
DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
- __FUNCTION__);
+ __func__);
nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
return -ENOMEM;
}
nic->mac_control.stats_info->sw_stat.mem_allocated
- += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
+ += (nic->num_entries * sizeof(struct msix_entry));
+
+ memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
nic->s2io_entries =
- kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
+ kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
GFP_KERNEL);
if (!nic->s2io_entries) {
DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
- __FUNCTION__);
+ __func__);
nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
kfree(nic->entries);
nic->mac_control.stats_info->sw_stat.mem_freed
- += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
+ += (nic->num_entries * sizeof(struct msix_entry));
return -ENOMEM;
}
nic->mac_control.stats_info->sw_stat.mem_allocated
- += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
-
- for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
- nic->entries[i].entry = i;
- nic->s2io_entries[i].entry = i;
+ += (nic->num_entries * sizeof(struct s2io_msix_entry));
+ memset(nic->s2io_entries, 0,
+ nic->num_entries * sizeof(struct s2io_msix_entry));
+
+ nic->entries[0].entry = 0;
+ nic->s2io_entries[0].entry = 0;
+ nic->s2io_entries[0].in_use = MSIX_FLG;
+ nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
+ nic->s2io_entries[0].arg = &nic->mac_control.fifos;
+
+ for (i = 1; i < nic->num_entries; i++) {
+ nic->entries[i].entry = ((i - 1) * 8) + 1;
+ nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
nic->s2io_entries[i].arg = NULL;
nic->s2io_entries[i].in_use = 0;
}
- tx_mat = readq(&bar0->tx_mat0_n[0]);
- for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
- tx_mat |= TX_MAT_SET(i, msix_indx);
- nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
- nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
- nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
- }
- writeq(tx_mat, &bar0->tx_mat0_n[0]);
-
rx_mat = readq(&bar0->rx_mat);
- for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
+ for (j = 0; j < nic->config.rx_ring_num; j++) {
rx_mat |= RX_MAT_SET(j, msix_indx);
- nic->s2io_entries[msix_indx].arg
- = &nic->mac_control.rings[j];
- nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
- nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
+ nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
+ nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
+ nic->s2io_entries[j+1].in_use = MSIX_FLG;
+ msix_indx += 8;
}
writeq(rx_mat, &bar0->rx_mat);
+ readq(&bar0->rx_mat);
- nic->avail_msix_vectors = 0;
- ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
+ ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
/* We fail init if error or we get less vectors than min required */
- if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
- nic->avail_msix_vectors = ret;
- ret = pci_enable_msix(nic->pdev, nic->entries, ret);
- }
if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
+ DBG_PRINT(ERR_DBG, "s2io: Enabling MSI-X failed\n");
kfree(nic->entries);
nic->mac_control.stats_info->sw_stat.mem_freed
- += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
+ += (nic->num_entries * sizeof(struct msix_entry));
kfree(nic->s2io_entries);
nic->mac_control.stats_info->sw_stat.mem_freed
- += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
+ += (nic->num_entries * sizeof(struct s2io_msix_entry));
nic->entries = NULL;
nic->s2io_entries = NULL;
- nic->avail_msix_vectors = 0;
return -ENOMEM;
}
- if (!nic->avail_msix_vectors)
- nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
/*
* To enable MSI-X, MSI also needs to be enabled, due to a bug
int i;
u16 msi_control;
- for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
+ for (i = 0; i < sp->num_entries; i++) {
if (sp->s2io_entries[i].in_use ==
MSIX_REGISTERED_SUCCESS) {
int vector = sp->entries[i].vector;
static int s2io_open(struct net_device *dev)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
int err = 0;
/*
netif_carrier_off(dev);
sp->last_link_state = 0;
- if (sp->config.intr_type == MSI_X) {
- int ret = s2io_enable_msi_x(sp);
-
- if (!ret) {
- ret = s2io_test_msi(sp);
- /* rollback MSI-X, will re-enable during add_isr() */
- remove_msix_isr(sp);
- }
- if (ret) {
-
- DBG_PRINT(ERR_DBG,
- "%s: MSI-X requested but failed to enable\n",
- dev->name);
- sp->config.intr_type = INTA;
- }
- }
-
- /* NAPI doesn't work well with MSI(X) */
- if (sp->config.intr_type != INTA) {
- if(sp->config.napi)
- sp->config.napi = 0;
- }
-
/* Initialize H/W and enable interrupts */
err = s2io_card_up(sp);
if (err) {
if (sp->entries) {
kfree(sp->entries);
sp->mac_control.stats_info->sw_stat.mem_freed
- += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
+ += (sp->num_entries * sizeof(struct msix_entry));
}
if (sp->s2io_entries) {
kfree(sp->s2io_entries);
sp->mac_control.stats_info->sw_stat.mem_freed
- += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
+ += (sp->num_entries * sizeof(struct s2io_msix_entry));
}
}
return err;
static int s2io_close(struct net_device *dev)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct config_param *config = &sp->config;
u64 tmp64;
int offset;
do_s2io_delete_unicast_mc(sp, tmp64);
}
- /* Reset card, kill tasklet and free Tx and Rx buffers. */
s2io_card_down(sp);
return 0;
static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
register u64 val64;
struct TxD *txdp;
return NETDEV_TX_LOCKED;
}
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_BUSY;
}
- } else
-#endif
- if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
+ } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
if (netif_queue_stopped(dev)) {
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_BUSY;
txdp->Buffer_Pointer = pci_map_single(sp->pdev,
fifo->ufo_in_band_v,
sizeof(u64), PCI_DMA_TODEVICE);
- if((txdp->Buffer_Pointer == 0) ||
- (txdp->Buffer_Pointer == DMA_ERROR_CODE))
+ if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
goto pci_map_failed;
txdp++;
}
txdp->Buffer_Pointer = pci_map_single
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
- if((txdp->Buffer_Pointer == 0) ||
- (txdp->Buffer_Pointer == DMA_ERROR_CODE))
+ if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
goto pci_map_failed;
txdp->Host_Control = (unsigned long) skb;
s2io_stop_tx_queue(sp, fifo->fifo_no);
}
mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&fifo->tx_lock, flags);
if (sp->config.intr_type == MSI_X)
mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
}
-static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
-{
- int rxb_size, level;
-
- if (!sp->lro) {
- rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
- level = rx_buffer_level(sp, rxb_size, rng_n);
-
- if ((level == PANIC) && (!TASKLET_IN_USE)) {
- int ret;
- DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
- DBG_PRINT(INTR_DBG, "PANIC levels\n");
- if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
- DBG_PRINT(INFO_DBG, "Out of memory in %s",
- __FUNCTION__);
- clear_bit(0, (&sp->tasklet_status));
- return -1;
- }
- clear_bit(0, (&sp->tasklet_status));
- } else if (level == LOW)
- tasklet_schedule(&sp->task);
-
- } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
- DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
- DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
- }
- return 0;
-}
-
static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
{
struct ring_info *ring = (struct ring_info *)dev_id;
struct s2io_nic *sp = ring->nic;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
- if (!is_s2io_card_up(sp))
+ if (unlikely(!is_s2io_card_up(sp)))
return IRQ_HANDLED;
- rx_intr_handler(ring);
- s2io_chk_rx_buffers(sp, ring->ring_no);
+ if (sp->config.napi) {
+ u8 __iomem *addr = NULL;
+ u8 val8 = 0;
+
+ addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
+ addr += (7 - ring->ring_no);
+ val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
+ writeb(val8, addr);
+ val8 = readb(addr);
+ napi_schedule(&ring->napi);
+ } else {
+ rx_intr_handler(ring, 0);
+ s2io_chk_rx_buffers(sp, ring);
+ }
return IRQ_HANDLED;
}
static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
{
- struct fifo_info *fifo = (struct fifo_info *)dev_id;
- struct s2io_nic *sp = fifo->nic;
+ int i;
+ struct fifo_info *fifos = (struct fifo_info *)dev_id;
+ struct s2io_nic *sp = fifos->nic;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ struct config_param *config = &sp->config;
+ u64 reason;
- if (!is_s2io_card_up(sp))
+ if (unlikely(!is_s2io_card_up(sp)))
+ return IRQ_NONE;
+
+ reason = readq(&bar0->general_int_status);
+ if (unlikely(reason == S2IO_MINUS_ONE))
+ /* Nothing much can be done. Get out */
return IRQ_HANDLED;
- tx_intr_handler(fifo);
- return IRQ_HANDLED;
+ if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
+ writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
+
+ if (reason & GEN_INTR_TXPIC)
+ s2io_txpic_intr_handle(sp);
+
+ if (reason & GEN_INTR_TXTRAFFIC)
+ writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
+
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&fifos[i]);
+
+ writeq(sp->general_int_mask, &bar0->general_int_mask);
+ readl(&bar0->general_int_status);
+ return IRQ_HANDLED;
+ }
+ /* The interrupt was not raised by us */
+ return IRQ_NONE;
}
+
static void s2io_txpic_intr_handle(struct s2io_nic *sp)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
static void s2io_handle_errors(void * dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 temp64 = 0,val64=0;
int i = 0;
static irqreturn_t s2io_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
int i;
u64 reason = 0;
if (config->napi) {
if (reason & GEN_INTR_RXTRAFFIC) {
- if (likely(netif_rx_schedule_prep(dev,
- &sp->napi))) {
- __netif_rx_schedule(dev, &sp->napi);
- writeq(S2IO_MINUS_ONE,
- &bar0->rx_traffic_mask);
- } else
- writeq(S2IO_MINUS_ONE,
- &bar0->rx_traffic_int);
+ napi_schedule(&sp->napi);
+ writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
+ writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
+ readl(&bar0->rx_traffic_int);
}
} else {
/*
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++)
- rx_intr_handler(&mac_control->rings[i]);
+ rx_intr_handler(&mac_control->rings[i], 0);
}
/*
*/
if (!config->napi) {
for (i = 0; i < config->rx_ring_num; i++)
- s2io_chk_rx_buffers(sp, i);
+ s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
}
writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status);
static struct net_device_stats *s2io_get_stats(struct net_device *dev)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct mac_info *mac_control;
struct config_param *config;
+ int i;
mac_control = &sp->mac_control;
/* Configure Stats for immediate updt */
s2io_updt_stats(sp);
+ /* Using sp->stats as a staging area, because reset (due to mtu
+ change, for example) will clear some hardware counters */
+ dev->stats.tx_packets +=
+ le32_to_cpu(mac_control->stats_info->tmac_frms) -
+ sp->stats.tx_packets;
sp->stats.tx_packets =
le32_to_cpu(mac_control->stats_info->tmac_frms);
+ dev->stats.tx_errors +=
+ le32_to_cpu(mac_control->stats_info->tmac_any_err_frms) -
+ sp->stats.tx_errors;
sp->stats.tx_errors =
le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
+ dev->stats.rx_errors +=
+ le64_to_cpu(mac_control->stats_info->rmac_drop_frms) -
+ sp->stats.rx_errors;
sp->stats.rx_errors =
le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
+ dev->stats.multicast =
+ le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) -
+ sp->stats.multicast;
sp->stats.multicast =
le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
+ dev->stats.rx_length_errors =
+ le64_to_cpu(mac_control->stats_info->rmac_long_frms) -
+ sp->stats.rx_length_errors;
sp->stats.rx_length_errors =
le64_to_cpu(mac_control->stats_info->rmac_long_frms);
- return (&sp->stats);
+ /* collect per-ring rx_packets and rx_bytes */
+ dev->stats.rx_packets = dev->stats.rx_bytes = 0;
+ for (i = 0; i < config->rx_ring_num; i++) {
+ dev->stats.rx_packets += mac_control->rings[i].rx_packets;
+ dev->stats.rx_bytes += mac_control->rings[i].rx_bytes;
+ }
+
+ return (&dev->stats);
}
/**
{
int i, j, prev_cnt;
struct dev_mc_list *mclist;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
0xfeffffffffffULL;
val64 = readq(&bar0->rx_pa_cfg);
val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
writeq(val64, &bar0->rx_pa_cfg);
- vlan_strip_flag = 0;
+ sp->vlan_strip_flag = 0;
}
val64 = readq(&bar0->mac_cfg);
val64 = readq(&bar0->rx_pa_cfg);
val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
writeq(val64, &bar0->rx_pa_cfg);
- vlan_strip_flag = 1;
+ sp->vlan_strip_flag = 1;
}
val64 = readq(&bar0->mac_cfg);
/* read from CAM unicast & multicast addresses and store it in
* def_mac_addr structure
*/
-void do_s2io_store_unicast_mc(struct s2io_nic *sp)
+static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
{
int offset;
u64 mac_addr = 0x0;
static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
register u64 mac_addr = 0, perm_addr = 0;
int i;
u64 tmp64;
static int s2io_ethtool_sset(struct net_device *dev,
struct ethtool_cmd *info)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
if ((info->autoneg == AUTONEG_ENABLE) ||
(info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
return -EINVAL;
static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
info->port = PORT_FIBRE;
static void s2io_ethtool_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
strncpy(info->version, s2io_driver_version, sizeof(info->version));
int i;
u64 reg;
u8 *reg_space = (u8 *) space;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
regs->len = XENA_REG_SPACE;
regs->version = sp->pdev->subsystem_device;
static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
{
u64 val64 = 0, last_gpio_ctrl_val;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u16 subid;
static void s2io_ethtool_gringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
int i,tx_desc_count=0,rx_desc_count=0;
if (sp->rxd_mode == RXD_MODE_1)
struct ethtool_pauseparam *ep)
{
u64 val64;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
val64 = readq(&bar0->rmac_pause_cfg);
if (val64 & RMAC_PAUSE_GEN_ENABLE)
- ep->tx_pause = TRUE;
+ ep->tx_pause = true;
if (val64 & RMAC_PAUSE_RX_ENABLE)
- ep->rx_pause = TRUE;
- ep->autoneg = FALSE;
+ ep->rx_pause = true;
+ ep->autoneg = false;
}
/**
struct ethtool_pauseparam *ep)
{
u64 val64;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
val64 = readq(&bar0->rmac_pause_cfg);
{
u32 i, valid;
u64 data;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
{
int len = eeprom->len, cnt = 0;
u64 valid = 0, data;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
DBG_PRINT(ERR_DBG,
struct ethtool_test *ethtest,
uint64_t * data)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
int orig_state = netif_running(sp->dev);
if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
u64 * tmp_stats)
{
int i = 0, k;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
struct stat_block *stat_info = sp->mac_control.stats_info;
s2io_updt_stats(sp);
static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
return (sp->rx_csum);
}
static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
if (data)
sp->rx_csum = 1;
static int s2io_get_sset_count(struct net_device *dev, int sset)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
u32 stringset, u8 * data)
{
int stat_size = 0;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
switch (stringset) {
case ETH_SS_TEST:
static int s2io_change_mtu(struct net_device *dev, int new_mtu)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
int ret = 0;
if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
ret = s2io_card_up(sp);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
- __FUNCTION__);
+ __func__);
return ret;
}
s2io_wake_all_tx_queue(sp);
}
/**
- * s2io_tasklet - Bottom half of the ISR.
- * @dev_adr : address of the device structure in dma_addr_t format.
- * Description:
- * This is the tasklet or the bottom half of the ISR. This is
- * an extension of the ISR which is scheduled by the scheduler to be run
- * when the load on the CPU is low. All low priority tasks of the ISR can
- * be pushed into the tasklet. For now the tasklet is used only to
- * replenish the Rx buffers in the Rx buffer descriptors.
- * Return value:
- * void.
- */
-
-static void s2io_tasklet(unsigned long dev_addr)
-{
- struct net_device *dev = (struct net_device *) dev_addr;
- struct s2io_nic *sp = dev->priv;
- int i, ret;
- struct mac_info *mac_control;
- struct config_param *config;
-
- mac_control = &sp->mac_control;
- config = &sp->config;
-
- if (!TASKLET_IN_USE) {
- for (i = 0; i < config->rx_ring_num; i++) {
- ret = fill_rx_buffers(sp, i);
- if (ret == -ENOMEM) {
- DBG_PRINT(INFO_DBG, "%s: Out of ",
- dev->name);
- DBG_PRINT(INFO_DBG, "memory in tasklet\n");
- break;
- } else if (ret == -EFILL) {
- DBG_PRINT(INFO_DBG,
- "%s: Rx Ring %d is full\n",
- dev->name, i);
- break;
- }
- }
- clear_bit(0, (&sp->tasklet_status));
- }
-}
-
-/**
* s2io_set_link - Set the LInk status
* @data: long pointer to device private structue
* Description: Sets the link status for the adapter
val64 |= ADAPTER_LED_ON;
writeq(val64, &bar0->adapter_control);
}
- nic->device_enabled_once = TRUE;
+ nic->device_enabled_once = true;
} else {
DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
pci_map_single( sp->pdev, (*skb)->data,
size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
- if( (rxdp1->Buffer0_ptr == 0) ||
- (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
+ if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
goto memalloc_failed;
- }
rxdp->Host_Control = (unsigned long) (*skb);
}
} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
pci_map_single(sp->pdev, (*skb)->data,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
- if( (rxdp3->Buffer2_ptr == 0) ||
- (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
+ if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
goto memalloc_failed;
- }
rxdp3->Buffer0_ptr = *temp0 =
pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
PCI_DMA_FROMDEVICE);
- if( (rxdp3->Buffer0_ptr == 0) ||
- (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
+ if (pci_dma_mapping_error(sp->pdev,
+ rxdp3->Buffer0_ptr)) {
pci_unmap_single (sp->pdev,
(dma_addr_t)rxdp3->Buffer2_ptr,
dev->mtu + 4, PCI_DMA_FROMDEVICE);
rxdp3->Buffer1_ptr = *temp1 =
pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
- if( (rxdp3->Buffer1_ptr == 0) ||
- (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
+ if (pci_dma_mapping_error(sp->pdev,
+ rxdp3->Buffer1_ptr)) {
pci_unmap_single (sp->pdev,
(dma_addr_t)rxdp3->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);
&skb,(u64 *)&temp0_64,
(u64 *)&temp1_64,
(u64 *)&temp2_64,
- size) == ENOMEM) {
+ size) == -ENOMEM) {
return 0;
}
/* After proper initialization of H/W, register ISR */
if (sp->config.intr_type == MSI_X) {
- int i, msix_tx_cnt=0,msix_rx_cnt=0;
-
- for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
- if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
- sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+ int i, msix_rx_cnt = 0;
+
+ for (i = 0; i < sp->num_entries; i++) {
+ if (sp->s2io_entries[i].in_use == MSIX_FLG) {
+ if (sp->s2io_entries[i].type ==
+ MSIX_RING_TYPE) {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_ring_handle, 0,
+ sp->desc[i],
+ sp->s2io_entries[i].arg);
+ } else if (sp->s2io_entries[i].type ==
+ MSIX_ALARM_TYPE) {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_fifo_handle, 0, sp->desc[i],
- sp->s2io_entries[i].arg);
- /* If either data or addr is zero print it */
- if(!(sp->msix_info[i].addr &&
- sp->msix_info[i].data)) {
- DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
- "Data:0x%llx\n",sp->desc[i],
- (unsigned long long)
- sp->msix_info[i].addr,
- (unsigned long long)
- sp->msix_info[i].data);
- } else {
- msix_tx_cnt++;
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_fifo_handle, 0,
+ sp->desc[i],
+ sp->s2io_entries[i].arg);
+
}
- } else {
- sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_ring_handle, 0, sp->desc[i],
- sp->s2io_entries[i].arg);
- /* If either data or addr is zero print it */
- if(!(sp->msix_info[i].addr &&
+ /* if either data or addr is zero print it. */
+ if (!(sp->msix_info[i].addr &&
sp->msix_info[i].data)) {
- DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
- "Data:0x%llx\n",sp->desc[i],
+ DBG_PRINT(ERR_DBG,
+ "%s @Addr:0x%llx Data:0x%llx\n",
+ sp->desc[i],
(unsigned long long)
sp->msix_info[i].addr,
(unsigned long long)
- sp->msix_info[i].data);
- } else {
+ ntohl(sp->msix_info[i].data));
+ } else
msix_rx_cnt++;
+ if (err) {
+ remove_msix_isr(sp);
+
+ DBG_PRINT(ERR_DBG,
+ "%s:MSI-X-%d registration "
+ "failed\n", dev->name, i);
+
+ DBG_PRINT(ERR_DBG,
+ "%s: Defaulting to INTA\n",
+ dev->name);
+ sp->config.intr_type = INTA;
+ break;
}
+ sp->s2io_entries[i].in_use =
+ MSIX_REGISTERED_SUCCESS;
}
- if (err) {
- remove_msix_isr(sp);
- DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
- "failed\n", dev->name, i);
- DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
- dev->name);
- sp->config.intr_type = INTA;
- break;
- }
- sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
}
if (!err) {
- printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
- msix_tx_cnt);
printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
- msix_rx_cnt);
+ --msix_rx_cnt);
+ DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
+ " through alarm vector\n");
}
}
if (sp->config.intr_type == INTA) {
{
int cnt = 0;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
- unsigned long flags;
register u64 val64 = 0;
struct config_param *config;
config = &sp->config;
clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
/* Disable napi */
- if (config->napi)
- napi_disable(&sp->napi);
+ if (sp->config.napi) {
+ int off = 0;
+ if (config->intr_type == MSI_X) {
+ for (; off < sp->config.rx_ring_num; off++)
+ napi_disable(&sp->mac_control.rings[off].napi);
+ }
+ else
+ napi_disable(&sp->napi);
+ }
/* disable Tx and Rx traffic on the NIC */
if (do_io)
s2io_rem_isr(sp);
- /* Kill tasklet. */
- tasklet_kill(&sp->task);
+ /* stop the tx queue, indicate link down */
+ s2io_link(sp, LINK_DOWN);
/* Check if the device is Quiescent and then Reset the NIC */
while(do_io) {
free_tx_buffers(sp);
/* Free all Rx buffers */
- spin_lock_irqsave(&sp->rx_lock, flags);
free_rx_buffers(sp);
- spin_unlock_irqrestore(&sp->rx_lock, flags);
clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
}
config = &sp->config;
for (i = 0; i < config->rx_ring_num; i++) {
- if ((ret = fill_rx_buffers(sp, i))) {
+ mac_control->rings[i].mtu = dev->mtu;
+ ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
+ if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name);
s2io_reset(sp);
return -ENOMEM;
}
DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
- atomic_read(&sp->rx_bufs_left[i]));
+ mac_control->rings[i].rx_bufs_left);
}
/* Initialise napi */
- if (config->napi)
- napi_enable(&sp->napi);
+ if (config->napi) {
+ if (config->intr_type == MSI_X) {
+ for (i = 0; i < sp->config.rx_ring_num; i++)
+ napi_enable(&sp->mac_control.rings[i].napi);
+ } else {
+ napi_enable(&sp->napi);
+ }
+ }
/* Maintain the state prior to the open */
if (sp->promisc_flg)
S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
- /* Enable tasklet for the device */
- tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
+ set_bit(__S2IO_STATE_CARD_UP, &sp->state);
/* Enable select interrupts */
en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
- if (sp->config.intr_type != INTA)
- en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
- else {
+ if (sp->config.intr_type != INTA) {
+ interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
+ en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
+ } else {
interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
interruptible |= TX_PIC_INTR;
en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
}
- set_bit(__S2IO_STATE_CARD_UP, &sp->state);
return 0;
}
static void s2io_tx_watchdog(struct net_device *dev)
{
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
if (netif_carrier_ok(dev)) {
sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
{
struct s2io_nic *sp = ring_data->nic;
- struct net_device *dev = (struct net_device *) sp->dev;
+ struct net_device *dev = (struct net_device *) ring_data->dev;
struct sk_buff *skb = (struct sk_buff *)
((unsigned long) rxdp->Host_Control);
int ring_no = ring_data->ring_no;
u16 l3_csum, l4_csum;
unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
- struct lro *lro;
+ struct lro *uninitialized_var(lro);
u8 err_mask;
skb->dev = dev;
if (err_mask != 0x5) {
DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
dev->name, err_mask);
- sp->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
sp->mac_control.stats_info->sw_stat.mem_freed
+= skb->truesize;
dev_kfree_skb(skb);
- atomic_dec(&sp->rx_bufs_left[ring_no]);
+ ring_data->rx_bufs_left -= 1;
rxdp->Host_Control = 0;
return 0;
}
}
/* Updating statistics */
- sp->stats.rx_packets++;
+ ring_data->rx_packets++;
rxdp->Host_Control = 0;
if (sp->rxd_mode == RXD_MODE_1) {
int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
- sp->stats.rx_bytes += len;
+ ring_data->rx_bytes += len;
skb_put(skb, len);
} else if (sp->rxd_mode == RXD_MODE_3B) {
unsigned char *buff = skb_push(skb, buf0_len);
struct buffAdd *ba = &ring_data->ba[get_block][get_off];
- sp->stats.rx_bytes += buf0_len + buf2_len;
+ ring_data->rx_bytes += buf0_len + buf2_len;
memcpy(buff, ba->ba_0, buf0_len);
skb_put(skb, buf2_len);
}
- if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
- (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
+ if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
+ (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
(sp->rx_csum)) {
l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
* a flag in the RxD.
*/
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (sp->lro) {
+ if (ring_data->lro) {
u32 tcp_len;
u8 *tcp;
int ret = 0;
- ret = s2io_club_tcp_session(skb->data, &tcp,
- &tcp_len, &lro,
- rxdp, sp);
+ ret = s2io_club_tcp_session(ring_data,
+ skb->data, &tcp, &tcp_len, &lro,
+ rxdp, sp);
switch (ret) {
case 3: /* Begin anew */
lro->parent = skb;
default:
DBG_PRINT(ERR_DBG,
"%s: Samadhana!!\n",
- __FUNCTION__);
+ __func__);
BUG();
}
}
sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
send_up:
+ skb_record_rx_queue(skb, ring_no);
queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
- dev->last_rx = jiffies;
aggregate:
- atomic_dec(&sp->rx_bufs_left[ring_no]);
+ sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
return SUCCESS;
}
DBG_PRINT(ERR_DBG, "tx fifos\n");
}
-#ifndef CONFIG_NETDEVICES_MULTIQUEUE
- if (multiq) {
- DBG_PRINT(ERR_DBG, "s2io: Multiqueue support not enabled\n");
- multiq = 0;
- }
-#endif
if (multiq)
*dev_multiq = multiq;
tx_steering_type = NO_STEERING;
}
- if ( rx_ring_num > 8) {
- DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
+ if (rx_ring_num > MAX_RX_RINGS) {
+ DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
"supported\n");
- DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
- rx_ring_num = 8;
+ DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
+ MAX_RX_RINGS);
+ rx_ring_num = MAX_RX_RINGS;
}
- if (*dev_intr_type != INTA)
- napi = 0;
if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
S2IO_BIT_RESET);
}
+static const struct net_device_ops s2io_netdev_ops = {
+ .ndo_open = s2io_open,
+ .ndo_stop = s2io_close,
+ .ndo_get_stats = s2io_get_stats,
+ .ndo_start_xmit = s2io_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = s2io_set_multicast,
+ .ndo_do_ioctl = s2io_ioctl,
+ .ndo_set_mac_address = s2io_set_mac_addr,
+ .ndo_change_mtu = s2io_change_mtu,
+ .ndo_vlan_rx_register = s2io_vlan_rx_register,
+ .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
+ .ndo_tx_timeout = s2io_tx_watchdog,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = s2io_netpoll,
+#endif
+};
+
/**
* s2io_init_nic - Initialization of the adapter .
* @pdev : structure containing the PCI related information of the device.
struct s2io_nic *sp;
struct net_device *dev;
int i, j, ret;
- int dma_flag = FALSE;
+ int dma_flag = false;
u32 mac_up, mac_down;
u64 val64 = 0, tmp64 = 0;
struct XENA_dev_config __iomem *bar0 = NULL;
int mode;
u8 dev_intr_type = intr_type;
u8 dev_multiq = 0;
- DECLARE_MAC_BUF(mac);
ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
if (ret)
return ret;
}
- if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
- dma_flag = TRUE;
+ dma_flag = true;
if (pci_set_consistent_dma_mask
- (pdev, DMA_64BIT_MASK)) {
+ (pdev, DMA_BIT_MASK(64))) {
DBG_PRINT(ERR_DBG,
"Unable to obtain 64bit DMA for \
consistent allocations\n");
pci_disable_device(pdev);
return -ENOMEM;
}
- } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
} else {
pci_disable_device(pdev);
return -ENOMEM;
}
if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
- DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
+ DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret);
pci_disable_device(pdev);
return -ENODEV;
}
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
if (dev_multiq)
dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
else
-#endif
- dev = alloc_etherdev(sizeof(struct s2io_nic));
+ dev = alloc_etherdev(sizeof(struct s2io_nic));
if (dev == NULL) {
DBG_PRINT(ERR_DBG, "Device allocation failed\n");
pci_disable_device(pdev);
SET_NETDEV_DEV(dev, &pdev->dev);
/* Private member variable initialized to s2io NIC structure */
- sp = dev->priv;
+ sp = netdev_priv(dev);
memset(sp, 0, sizeof(struct s2io_nic));
sp->dev = dev;
sp->pdev = pdev;
sp->high_dma_flag = dma_flag;
- sp->device_enabled_once = FALSE;
+ sp->device_enabled_once = false;
if (rx_ring_mode == 1)
sp->rxd_mode = RXD_MODE_1;
if (rx_ring_mode == 2)
/* Rx side parameters. */
config->rx_ring_num = rx_ring_num;
- for (i = 0; i < MAX_RX_RINGS; i++) {
+ for (i = 0; i < config->rx_ring_num; i++) {
config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
(rxd_count[sp->rxd_mode] + 1);
config->rx_cfg[i].ring_priority = i;
+ mac_control->rings[i].rx_bufs_left = 0;
+ mac_control->rings[i].rxd_mode = sp->rxd_mode;
+ mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
+ mac_control->rings[i].pdev = sp->pdev;
+ mac_control->rings[i].dev = sp->dev;
}
for (i = 0; i < rx_ring_num; i++) {
mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
- /* Initialize Ring buffer parameters. */
- for (i = 0; i < config->rx_ring_num; i++)
- atomic_set(&sp->rx_bufs_left[i], 0);
-
/* initialize the shared memory used by the NIC and the host */
if (init_shared_mem(sp)) {
DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
goto mem_alloc_failed;
}
- sp->bar0 = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ sp->bar0 = pci_ioremap_bar(pdev, 0);
if (!sp->bar0) {
DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
dev->name);
goto bar0_remap_failed;
}
- sp->bar1 = ioremap(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
+ sp->bar1 = pci_ioremap_bar(pdev, 2);
if (!sp->bar1) {
DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
dev->name);
}
/* Driver entry points */
- dev->open = &s2io_open;
- dev->stop = &s2io_close;
- dev->hard_start_xmit = &s2io_xmit;
- dev->get_stats = &s2io_get_stats;
- dev->set_multicast_list = &s2io_set_multicast;
- dev->do_ioctl = &s2io_ioctl;
- dev->set_mac_address = &s2io_set_mac_addr;
- dev->change_mtu = &s2io_change_mtu;
+ dev->netdev_ops = &s2io_netdev_ops;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->vlan_rx_register = s2io_vlan_rx_register;
- dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
-
- /*
- * will use eth_mac_addr() for dev->set_mac_address
- * mac address will be set every time dev->open() is called
- */
- netif_napi_add(dev, &sp->napi, s2io_poll, 32);
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = s2io_netpoll;
-#endif
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- if (sp->high_dma_flag == TRUE)
+ if (sp->high_dma_flag == true)
dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_TSO;
dev->features |= NETIF_F_TSO6;
dev->features |= NETIF_F_UFO;
dev->features |= NETIF_F_HW_CSUM;
}
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- if (config->multiq)
- dev->features |= NETIF_F_MULTI_QUEUE;
-#endif
- dev->tx_timeout = &s2io_tx_watchdog;
dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
INIT_WORK(&sp->set_link_task, s2io_set_link);
if (sp->device_type & XFRAME_II_DEVICE) {
mode = s2io_verify_pci_mode(sp);
if (mode < 0) {
- DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
+ DBG_PRINT(ERR_DBG, "%s: ", __func__);
DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
ret = -EBADSLT;
goto set_swap_failed;
}
}
+ if (sp->config.intr_type == MSI_X) {
+ sp->num_entries = config->rx_ring_num + 1;
+ ret = s2io_enable_msi_x(sp);
+
+ if (!ret) {
+ ret = s2io_test_msi(sp);
+ /* rollback MSI-X, will re-enable during add_isr() */
+ remove_msix_isr(sp);
+ }
+ if (ret) {
+
+ DBG_PRINT(ERR_DBG,
+ "s2io: MSI-X requested but failed to enable\n");
+ sp->config.intr_type = INTA;
+ }
+ }
+
+ if (config->intr_type == MSI_X) {
+ for (i = 0; i < config->rx_ring_num ; i++)
+ netif_napi_add(dev, &mac_control->rings[i].napi,
+ s2io_poll_msix, 64);
+ } else {
+ netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
+ }
+
/* Not needed for Herc */
if (sp->device_type & XFRAME_I_DEVICE) {
/*
/* store mac addresses from CAM to s2io_nic structure */
do_s2io_store_unicast_mc(sp);
+ /* Configure MSIX vector for number of rings configured plus one */
+ if ((sp->device_type == XFRAME_II_DEVICE) &&
+ (config->intr_type == MSI_X))
+ sp->num_entries = config->rx_ring_num + 1;
+
/* Store the values of the MSIX table in the s2io_nic structure */
store_xmsi_data(sp);
/* reset Nic and bring it to known state */
s2io_reset(sp);
/*
- * Initialize the tasklet status and link state flags
+ * Initialize link state flags
* and the card state parameter
*/
- sp->tasklet_status = 0;
sp->state = 0;
/* Initialize spinlocks */
for (i = 0; i < sp->config.tx_fifo_num; i++)
spin_lock_init(&mac_control->fifos[i].tx_lock);
- if (!napi)
- spin_lock_init(&sp->put_lock);
- spin_lock_init(&sp->rx_lock);
-
/*
* SXE-002: Configure link and activity LED to init state
* on driver load.
sp->product_name, pdev->revision);
DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
s2io_driver_version);
- DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
- dev->name, print_mac(mac, dev->dev_addr));
+ DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %pM\n", dev->name, dev->dev_addr);
DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
if (sp->device_type & XFRAME_II_DEVICE) {
mode = s2io_print_pci_mode(sp);
break;
}
- if (napi)
+ switch (sp->config.napi) {
+ case 0:
+ DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
+ break;
+ case 1:
DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
+ break;
+ }
DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
sp->config.tx_fifo_num);
+ DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
+ sp->config.rx_ring_num);
+
switch(sp->config.intr_type) {
case INTA:
DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
break;
}
if (sp->config.multiq) {
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- mac_control->fifos[i].multiq = config->multiq;
+ for (i = 0; i < sp->config.tx_fifo_num; i++)
+ mac_control->fifos[i].multiq = config->multiq;
DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
dev->name);
} else
/* Initialize device name */
sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
+ if (vlan_tag_strip)
+ sp->vlan_strip_flag = 1;
+ else
+ sp->vlan_strip_flag = 0;
+
/*
* Make Link state as off at this point, when the Link change
* interrupt comes the state will be automatically changed to
flush_scheduled_work();
- sp = dev->priv;
+ sp = netdev_priv(dev);
unregister_netdev(dev);
free_shared_mem(sp);
if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
- __FUNCTION__);
+ __func__);
return -1;
}
* If vlan stripping is disabled and the frame is VLAN tagged,
* shift the offset by the VLAN header size bytes.
*/
- if ((!vlan_strip_flag) &&
+ if ((!sp->vlan_strip_flag) &&
(rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
ip_off += HEADER_VLAN_SIZE;
} else {
static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
struct tcphdr *tcp)
{
- DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
+ DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
(lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
return -1;
static void initiate_new_session(struct lro *lro, u8 *l2h,
struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
{
- DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
+ DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
lro->l2h = l2h;
lro->iph = ip;
lro->tcph = tcp;
struct tcphdr *tcp = lro->tcph;
__sum16 nchk;
struct stat_block *statinfo = sp->mac_control.stats_info;
- DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
+ DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
/* Update L3 header */
ip->tot_len = htons(lro->total_len);
static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
struct tcphdr *tcp, u32 l4_pyld)
{
- DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
+ DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
lro->total_len += l4_pyld;
lro->frags_len += l4_pyld;
lro->tcp_next_seq += l4_pyld;
{
u8 *ptr;
- DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
+ DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
if (!tcp_pyld_len) {
/* Runt frame or a pure ack */
}
static int
-s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
- struct RxD_t *rxdp, struct s2io_nic *sp)
+s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
+ u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
+ struct s2io_nic *sp)
{
struct iphdr *ip;
struct tcphdr *tcph;
tcph = (struct tcphdr *)*tcp;
*tcp_len = get_l4_pyld_length(ip, tcph);
for (i=0; i<MAX_LRO_SESSIONS; i++) {
- struct lro *l_lro = &sp->lro0_n[i];
+ struct lro *l_lro = &ring_data->lro0_n[i];
if (l_lro->in_use) {
if (check_for_socket_match(l_lro, ip, tcph))
continue;
if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
- "0x%x, actual 0x%x\n", __FUNCTION__,
+ "0x%x, actual 0x%x\n", __func__,
(*lro)->tcp_next_seq,
ntohl(tcph->seq));
}
for (i=0; i<MAX_LRO_SESSIONS; i++) {
- struct lro *l_lro = &sp->lro0_n[i];
+ struct lro *l_lro = &ring_data->lro0_n[i];
if (!(l_lro->in_use)) {
*lro = l_lro;
ret = 3; /* Begin anew */
if (ret == 0) { /* sessions exceeded */
DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
- __FUNCTION__);
+ __func__);
*lro = NULL;
return ret;
}
break;
default:
DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
- __FUNCTION__);
+ __func__);
break;
}
static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
{
struct net_device *dev = skb->dev;
- struct s2io_nic *sp = dev->priv;
+ struct s2io_nic *sp = netdev_priv(dev);
skb->protocol = eth_type_trans(skb, dev);
if (sp->vlgrp && vlan_tag
- && (vlan_strip_flag)) {
+ && (sp->vlan_strip_flag)) {
/* Queueing the vlan frame to the upper layer */
if (sp->config.napi)
vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct s2io_nic *sp = netdev->priv;
+ struct s2io_nic *sp = netdev_priv(netdev);
netif_device_detach(netdev);
static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct s2io_nic *sp = netdev->priv;
+ struct s2io_nic *sp = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
printk(KERN_ERR "s2io: "
static void s2io_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct s2io_nic *sp = netdev->priv;
+ struct s2io_nic *sp = netdev_priv(netdev);
if (netif_running(netdev)) {
if (s2io_card_up(sp)) {
}
netif_device_attach(netdev);
- netif_wake_queue(netdev);
+ netif_tx_wake_all_queues(netdev);
}