*
* TODO
* rx_copybreak/alignment
- * Scatter gather
* More testing
*
* The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
#define DMA_LENGTH_MIN 0
#define DMA_LENGTH_MAX 7
-#define DMA_LENGTH_DEF 0
+#define DMA_LENGTH_DEF 6
/* DMA_length[] is used for controlling the DMA length
0: 8 DWORDs
*/
VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
-#define TX_CSUM_DEF 1
-/* txcsum_offload[] is used for setting the checksum offload ability of NIC.
- (We only support RX checksum offload now)
- 0: disable csum_offload[checksum offload
- 1: enable checksum offload. (Default)
-*/
-VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
-
#define FLOW_CNTL_DEF 1
#define FLOW_CNTL_MIN 1
#define FLOW_CNTL_MAX 5
*/
VELOCITY_PARAM(wol_opts, "Wake On Lan options");
-#define INT_WORKS_DEF 20
-#define INT_WORKS_MIN 10
-#define INT_WORKS_MAX 64
-
-VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
-
static int rx_copybreak = 200;
module_param(rx_copybreak, int, 0644);
MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
* Describe the PCI device identifiers that we support in this
* device driver. Used for hotplug autoloading.
*/
-static const struct pci_device_id velocity_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
{ }
};
velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
- velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
- velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname);
opts->numrx = (opts->numrx & ~3);
}
u32 status = 0;
u16 ANAR;
- if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
+ if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
status |= VELOCITY_LINK_FAIL;
- if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
- else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
+ else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
status |= (VELOCITY_SPEED_1000);
else {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if (ANAR & ANAR_TXFD)
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if (ANAR & ADVERTISE_100FULL)
status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
- else if (ANAR & ANAR_TX)
+ else if (ANAR & ADVERTISE_100HALF)
status |= VELOCITY_SPEED_100;
- else if (ANAR & ANAR_10FD)
+ else if (ANAR & ADVERTISE_10FULL)
status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
else
status |= (VELOCITY_SPEED_10);
}
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_AUTONEG_ENABLE;
}
}
/*Enable or Disable PAUSE in ANAR */
switch (vptr->options.flow_cntl) {
case FLOW_CNTL_TX:
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_RX:
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_TX_RX:
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_DISABLE:
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
default:
break;
*/
static void mii_set_auto_on(struct velocity_info *vptr)
{
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
else
- MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
}
static u32 check_connection_type(struct mac_regs __iomem *regs)
else
status |= VELOCITY_SPEED_100;
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_AUTONEG_ENABLE;
}
}
/*
Check if new status is consisent with current status
- if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
- || (mii_status==curr_status)) {
+ if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
+ (mii_status==curr_status)) {
vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
vptr->mii_status=check_connection_type(vptr->mac_regs);
VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
*/
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
/*
* If connection type is AUTO
/* clear force MAC mode bit */
BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
/* set duplex mode of MAC according to duplex mode of MII */
- MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
- MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
/* enable AUTO-NEGO mode */
mii_set_auto_on(vptr);
BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
}
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
else
BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
- /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
- velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
- ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
+ /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
+ velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
+ ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
if (mii_status & VELOCITY_SPEED_100) {
if (mii_status & VELOCITY_DUPLEX_FULL)
- ANAR |= ANAR_TXFD;
+ ANAR |= ADVERTISE_100FULL;
else
- ANAR |= ANAR_TX;
+ ANAR |= ADVERTISE_100HALF;
} else {
if (mii_status & VELOCITY_DUPLEX_FULL)
- ANAR |= ANAR_10FD;
+ ANAR |= ADVERTISE_10FULL;
else
- ANAR |= ANAR_10;
+ ANAR |= ADVERTISE_10HALF;
}
- velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
+ velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
/* enable AUTO-NEGO mode */
mii_set_auto_on(vptr);
- /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
+ /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
}
/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
struct mac_regs __iomem *regs = vptr->mac_regs;
u8 rx_mode;
int i;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writel(0xffffffff, ®s->MARCAM[0]);
writel(0xffffffff, ®s->MARCAM[4]);
rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
- } else if ((dev->mc_count > vptr->multicast_limit)
- || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
writel(0xffffffff, ®s->MARCAM[0]);
writel(0xffffffff, ®s->MARCAM[4]);
rx_mode = (RCR_AM | RCR_AB);
int offset = MCAM_SIZE - vptr->multicast_limit;
mac_get_cam_mask(regs, vptr->mCAMmask);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
- mac_set_cam(regs, i + offset, mclist->dmi_addr);
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_set_cam(regs, i + offset, ha->addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
+ i++;
}
mac_set_cam_mask(regs, vptr->mCAMmask);
/*
* Reset to hardware default
*/
- MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
* off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue.
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
else
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
/*
* Turn on Link/Activity LED enable bit for CIS8201
*/
- MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
+ MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
break;
case PHYID_VT3216_32BIT:
case PHYID_VT3216_64BIT:
/*
* Reset to hardware default
*/
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
* off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
else
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
break;
case PHYID_MARVELL_1000:
/*
* Reset to hardware default
*/
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
break;
default:
;
}
- velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
- if (BMCR & BMCR_ISO) {
- BMCR &= ~BMCR_ISO;
- velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
+ velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
+ if (BMCR & BMCR_ISOLATE) {
+ BMCR &= ~BMCR_ISOLATE;
+ velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
}
}
+/**
+ * setup_queue_timers - Setup interrupt timers
+ *
+ * Setup interrupt frequency during suppression (timeout if the frame
+ * count isn't filled).
+ */
+static void setup_queue_timers(struct velocity_info *vptr)
+{
+ /* Only for newer revisions */
+ if (vptr->rev_id >= REV_ID_VT3216_A0) {
+ u8 txqueue_timer = 0;
+ u8 rxqueue_timer = 0;
+
+ if (vptr->mii_status & (VELOCITY_SPEED_1000 |
+ VELOCITY_SPEED_100)) {
+ txqueue_timer = vptr->options.txqueue_timer;
+ rxqueue_timer = vptr->options.rxqueue_timer;
+ }
+
+ writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
+ writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
+ }
+}
+/**
+ * setup_adaptive_interrupts - Setup interrupt suppression
+ *
+ * @vptr velocity adapter
+ *
+ * The velocity is able to suppress interrupt during high interrupt load.
+ * This function turns on that feature.
+ */
+static void setup_adaptive_interrupts(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ u16 tx_intsup = vptr->options.tx_intsup;
+ u16 rx_intsup = vptr->options.rx_intsup;
+
+ /* Setup default interrupt mask (will be changed below) */
+ vptr->int_mask = INT_MASK_DEF;
+
+ /* Set Tx Interrupt Suppression Threshold */
+ writeb(CAMCR_PS0, ®s->CAMCR);
+ if (tx_intsup != 0) {
+ vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
+ ISR_PTX2I | ISR_PTX3I);
+ writew(tx_intsup, ®s->ISRCTL);
+ } else
+ writew(ISRCTL_TSUPDIS, ®s->ISRCTL);
+
+ /* Set Rx Interrupt Suppression Threshold */
+ writeb(CAMCR_PS1, ®s->CAMCR);
+ if (rx_intsup != 0) {
+ vptr->int_mask &= ~ISR_PRXI;
+ writew(rx_intsup, ®s->ISRCTL);
+ } else
+ writew(ISRCTL_RSUPDIS, ®s->ISRCTL);
+
+ /* Select page to interrupt hold timer */
+ writeb(0, ®s->CAMCR);
+}
/**
* velocity_init_registers - initialise MAC registers
*/
enable_mii_autopoll(regs);
- vptr->int_mask = INT_MASK_DEF;
+ setup_adaptive_interrupts(vptr);
writel(vptr->rx.pool_dma, ®s->RDBaseLo);
writew(vptr->options.numrx - 1, ®s->RDCSize);
* Do the gymnastics to get the buffer head for data at
* 64byte alignment.
*/
- skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
+ skb_reserve(rd_info->skb,
+ 64 - ((unsigned long) rd_info->skb->data & 63));
rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
*/
static int velocity_init_td_ring(struct velocity_info *vptr)
{
- dma_addr_t curr;
int j;
/* Init the TD ring entries */
for (j = 0; j < vptr->tx.numq; j++) {
- curr = vptr->tx.pool_dma[j];
vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
sizeof(struct velocity_td_info),
* Release an transmit buffer. If the buffer was preallocated then
* recycle it, if not then unmap the buffer.
*/
-static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
+static void velocity_free_tx_buf(struct velocity_info *vptr,
+ struct velocity_td_info *tdinfo, struct tx_desc *td)
{
struct sk_buff *skb = tdinfo->skb;
- int i;
- int pktlen;
/*
* Don't unmap the pre-allocated tx_bufs
*/
if (tdinfo->skb_dma) {
+ int i;
- pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
for (i = 0; i < tdinfo->nskb_dma; i++) {
- pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
- tdinfo->skb_dma[i] = 0;
+ size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
+
+ /* For scatter-gather */
+ if (skb_shinfo(skb)->nr_frags > 0)
+ pktlen = max_t(size_t, pktlen,
+ td->td_buf[i].size & ~TD_QUEUE);
+
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
+ le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
}
}
dev_kfree_skb_irq(skb);
BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
else
BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
+
+ setup_queue_timers(vptr);
}
/*
* Get link status from PHYSR0
/**
* tx_srv - transmit interrupt service
* @vptr; Velocity
- * @status:
*
* Scan the queues looking for transmitted packets that
* we can complete and clean up. Update any statistics as
* necessary/
*/
-static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
+static int velocity_tx_srv(struct velocity_info *vptr)
{
struct tx_desc *td;
int qnum;
stats->tx_packets++;
stats->tx_bytes += tdinfo->skb->len;
}
- velocity_free_tx_buf(vptr, tdinfo);
+ velocity_free_tx_buf(vptr, tdinfo, td);
vptr->tx.used[qnum]--;
}
vptr->tx.tail[qnum] = idx;
* Look to see if we should kick the transmit network
* layer for more work.
*/
- if (netif_queue_stopped(vptr->dev) && (full == 0)
- && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
+ if (netif_queue_stopped(vptr->dev) && (full == 0) &&
+ (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
netif_wake_queue(vptr->dev);
}
return works;
/**
* velocity_rx_srv - service RX interrupt
* @vptr: velocity
- * @status: adapter status (unused)
*
* Walk the receive ring of the velocity adapter and remove
* any received packets from the receive queue. Hand the ring
* slots back to the adapter for reuse.
*/
-static int velocity_rx_srv(struct velocity_info *vptr, int status)
+static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
{
struct net_device_stats *stats = &vptr->dev->stats;
int rd_curr = vptr->rx.curr;
int works = 0;
- do {
+ while (works < budget_left) {
struct rx_desc *rd = vptr->rx.ring + rd_curr;
if (!vptr->rx.info[rd_curr].skb)
rd_curr++;
if (rd_curr >= vptr->options.numrx)
rd_curr = 0;
- } while (++works <= 15);
+ works++;
+ }
vptr->rx.curr = rd_curr;
return works;
}
+static int velocity_poll(struct napi_struct *napi, int budget)
+{
+ struct velocity_info *vptr = container_of(napi,
+ struct velocity_info, napi);
+ unsigned int rx_done;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vptr->lock, flags);
+ /*
+ * Do rx and tx twice for performance (taken from the VIA
+ * out-of-tree driver).
+ */
+ rx_done = velocity_rx_srv(vptr, budget / 2);
+ velocity_tx_srv(vptr);
+ rx_done += velocity_rx_srv(vptr, budget - rx_done);
+ velocity_tx_srv(vptr);
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (rx_done < budget) {
+ napi_complete(napi);
+ mac_enable_int(vptr->mac_regs);
+ }
+ spin_unlock_irqrestore(&vptr->lock, flags);
+
+ return rx_done;
+}
/**
* velocity_intr - interrupt callback
struct net_device *dev = dev_instance;
struct velocity_info *vptr = netdev_priv(dev);
u32 isr_status;
- int max_count = 0;
-
spin_lock(&vptr->lock);
isr_status = mac_read_isr(vptr->mac_regs);
return IRQ_NONE;
}
- mac_disable_int(vptr->mac_regs);
-
- /*
- * Keep processing the ISR until we have completed
- * processing and the isr_status becomes zero
- */
+ /* Ack the interrupt */
+ mac_write_isr(vptr->mac_regs, isr_status);
- while (isr_status != 0) {
- mac_write_isr(vptr->mac_regs, isr_status);
- if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
- velocity_error(vptr, isr_status);
- if (isr_status & (ISR_PRXI | ISR_PPRXI))
- max_count += velocity_rx_srv(vptr, isr_status);
- if (isr_status & (ISR_PTXI | ISR_PPTXI))
- max_count += velocity_tx_srv(vptr, isr_status);
- isr_status = mac_read_isr(vptr->mac_regs);
- if (max_count > vptr->options.int_works) {
- printk(KERN_WARNING "%s: excessive work at interrupt.\n",
- dev->name);
- max_count = 0;
- }
+ if (likely(napi_schedule_prep(&vptr->napi))) {
+ mac_disable_int(vptr->mac_regs);
+ __napi_schedule(&vptr->napi);
}
+
+ if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
+ velocity_error(vptr, isr_status);
+
spin_unlock(&vptr->lock);
- mac_enable_int(vptr->mac_regs);
- return IRQ_HANDLED;
+ return IRQ_HANDLED;
}
/**
/* Ensure chip is running */
pci_set_power_state(vptr->pdev, PCI_D0);
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
- ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
+ ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
dev->name, dev);
if (ret < 0) {
/* Power down the chip */
goto out;
}
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
+ napi_enable(&vptr->napi);
vptr->flags |= VELOCITY_FLAGS_OPENED;
out:
return ret;
dev->mtu = new_mtu;
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
{
struct velocity_info *vptr = netdev_priv(dev);
+ napi_disable(&vptr->napi);
netif_stop_queue(dev);
velocity_shutdown(vptr);
struct velocity_td_info *tdinfo;
unsigned long flags;
int pktlen;
- __le16 len;
- int index;
+ int index, prev;
+ int i = 0;
if (skb_padto(skb, ETH_ZLEN))
goto out;
- pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
- len = cpu_to_le16(pktlen);
+ /* The hardware can handle at most 7 memory segments, so merge
+ * the skb if there are more */
+ if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pktlen = skb_shinfo(skb)->nr_frags == 0 ?
+ max_t(unsigned int, skb->len, ETH_ZLEN) :
+ skb_headlen(skb);
spin_lock_irqsave(&vptr->lock, flags);
*/
tdinfo->skb = skb;
tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
- td_ptr->tdesc0.len = len;
+ td_ptr->tdesc0.len = cpu_to_le16(pktlen);
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].size = len;
- tdinfo->nskb_dma = 1;
+ td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
+
+ /* Handle fragments */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
+ frag->page_offset, frag->size,
+ PCI_DMA_TODEVICE);
+
+ td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
+ td_ptr->td_buf[i + 1].pa_high = 0;
+ td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
+ }
+ tdinfo->nskb_dma = i + 1;
td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
/*
* Handle hardware checksum
*/
- if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
- && (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ if ((dev->features & NETIF_F_IP_CSUM) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL)) {
const struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP)
td_ptr->tdesc1.TCR |= TCR0_TCPCK;
td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
td_ptr->tdesc1.TCR |= TCR0_IPCK;
}
- {
- int prev = index - 1;
+ prev = index - 1;
+ if (prev < 0)
+ prev = vptr->options.numtx - 1;
+ td_ptr->tdesc0.len |= OWNED_BY_NIC;
+ vptr->tx.used[qnum]++;
+ vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
- if (prev < 0)
- prev = vptr->options.numtx - 1;
- td_ptr->tdesc0.len |= OWNED_BY_NIC;
- vptr->tx.used[qnum]++;
- vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
+ if (AVAIL_TD(vptr, qnum) < 1)
+ netif_stop_queue(dev);
- if (AVAIL_TD(vptr, qnum) < 1)
- netif_stop_queue(dev);
+ td_ptr = &(vptr->tx.rings[qnum][prev]);
+ td_ptr->td_buf[0].size |= TD_QUEUE;
+ mac_tx_queue_wake(vptr->mac_regs, qnum);
- td_ptr = &(vptr->tx.rings[qnum][prev]);
- td_ptr->td_buf[0].size |= TD_QUEUE;
- mac_tx_queue_wake(vptr->mac_regs, qnum);
- }
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&vptr->lock, flags);
out:
return NETDEV_TX_OK;
struct net_device *dev = vptr->dev;
printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
- printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- dev->name,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ printk(KERN_INFO "%s: Ethernet Address: %pM\n",
+ dev->name, dev->dev_addr);
}
static u32 velocity_get_link(struct net_device *dev)
dev->irq = pdev->irq;
dev->netdev_ops = &velocity_netdev_ops;
dev->ethtool_ops = &velocity_ethtool_ops;
+ netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
- NETIF_F_HW_VLAN_RX;
-
- if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
- dev->features |= NETIF_F_IP_CSUM;
+ NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
ret = register_netdev(dev);
if (ret < 0)
if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
}
if (vptr->mii_status & VELOCITY_SPEED_1000)
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
velocity_init_registers(vptr, VELOCITY_INIT_WOL);
mac_disable_int(vptr->mac_regs);
- velocity_tx_srv(vptr, 0);
+ velocity_tx_srv(vptr);
for (i = 0; i < vptr->tx.numq; i++) {
if (vptr->tx.used[i])
msglevel = value;
}
+static int get_pending_timer_val(int val)
+{
+ int mult_bits = val >> 6;
+ int mult = 1;
+
+ switch (mult_bits)
+ {
+ case 1:
+ mult = 4; break;
+ case 2:
+ mult = 16; break;
+ case 3:
+ mult = 64; break;
+ case 0:
+ default:
+ break;
+ }
+
+ return (val & 0x3f) * mult;
+}
+
+static void set_pending_timer_val(int *val, u32 us)
+{
+ u8 mult = 0;
+ u8 shift = 0;
+
+ if (us >= 0x3f) {
+ mult = 1; /* mult with 4 */
+ shift = 2;
+ }
+ if (us >= 0x3f * 4) {
+ mult = 2; /* mult with 16 */
+ shift = 4;
+ }
+ if (us >= 0x3f * 16) {
+ mult = 3; /* mult with 64 */
+ shift = 6;
+ }
+
+ *val = (mult << 6) | ((us >> shift) & 0x3f);
+}
+
+
+static int velocity_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
+ ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
+
+ ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
+ ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
+
+ return 0;
+}
+
+static int velocity_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ int max_us = 0x3f * 64;
+ unsigned long flags;
+
+ /* 6 bits of */
+ if (ecmd->tx_coalesce_usecs > max_us)
+ return -EINVAL;
+ if (ecmd->rx_coalesce_usecs > max_us)
+ return -EINVAL;
+
+ if (ecmd->tx_max_coalesced_frames > 0xff)
+ return -EINVAL;
+ if (ecmd->rx_max_coalesced_frames > 0xff)
+ return -EINVAL;
+
+ vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
+ vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
+
+ set_pending_timer_val(&vptr->options.rxqueue_timer,
+ ecmd->rx_coalesce_usecs);
+ set_pending_timer_val(&vptr->options.txqueue_timer,
+ ecmd->tx_coalesce_usecs);
+
+ /* Setup the interrupt suppression and queue timers */
+ spin_lock_irqsave(&vptr->lock, flags);
+ mac_disable_int(vptr->mac_regs);
+ setup_adaptive_interrupts(vptr);
+ setup_queue_timers(vptr);
+
+ mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
+ mac_clear_isr(vptr->mac_regs);
+ mac_enable_int(vptr->mac_regs);
+ spin_unlock_irqrestore(&vptr->lock, flags);
+
+ return 0;
+}
+
static const struct ethtool_ops velocity_ethtool_ops = {
.get_settings = velocity_get_settings,
.set_settings = velocity_set_settings,
.get_drvinfo = velocity_get_drvinfo,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
.get_wol = velocity_ethtool_get_wol,
.set_wol = velocity_ethtool_set_wol,
.get_msglevel = velocity_get_msglevel,
.set_msglevel = velocity_set_msglevel,
+ .set_sg = ethtool_op_set_sg,
.get_link = velocity_get_link,
+ .get_coalesce = velocity_get_coalesce,
+ .set_coalesce = velocity_set_coalesce,
.begin = velocity_ethtool_up,
.complete = velocity_ethtool_down
};