* -- on page reclamation, the driver swaps the page with a spare page.
* if that page is still in use, it frees its reference to that page,
* and allocates a new page for use. otherwise, it just recycles the
- * the page.
+ * the page.
*
* NOTE: cassini can parse the header. however, it's not worth it
* as long as the network stack requires a header copy.
* interrupts, but the INT# assignment needs to be set up properly by
* the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
* that. also, the two descriptor rings are designed to distinguish between
- * encrypted and non-encrypted packets, but we use them for buffering
+ * encrypted and non-encrypted packets, but we use them for buffering
* instead.
*
- * by default, the selective clear mask is set up to process rx packets.
+ * by default, the selective clear mask is set up to process rx packets.
*/
-#include <linux/config.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mutex.h>
+#include <linux/firmware.h>
#include <net/checksum.h>
#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
#define CAS_NCPUS num_online_cpus()
-#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
+#ifdef CONFIG_CASSINI_NAPI
#define USE_NAPI
#define cas_skb_release(x) netif_receive_skb(x)
#else
#endif
/* select which firmware to use */
-#define USE_HP_WORKAROUND
+#define USE_HP_WORKAROUND
#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
#define DRV_MODULE_NAME "cassini"
-#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.4"
-#define DRV_MODULE_RELDATE "1 July 2004"
+#define DRV_MODULE_VERSION "1.6"
+#define DRV_MODULE_RELDATE "21 May 2008"
#define CAS_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \
#define STOP_TRIES_PHY 1000
#define STOP_TRIES 5000
-/* specify a minimum frame size to deal with some fifo issues
+/* specify a minimum frame size to deal with some fifo issues
* max mtu == 2 * page size - ethernet header - 64 - swivel =
* 2 * page_size - 0x50
*/
MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("sun/cassini.bin");
module_param(cassini_debug, int, 0);
MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
module_param(link_mode, int, 0);
* being confused and never showing a link status of "up."
*/
#define DEFAULT_LINKDOWN_TIMEOUT 5
-/*
+/*
* Value in seconds, for user input.
*/
static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
};
-static struct pci_device_id cas_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
{
int i;
- for (i = 0; i < N_TX_RINGS; i++)
+ for (i = 0; i < N_TX_RINGS; i++)
spin_lock(&cp->tx_lock[i]);
}
{
int i;
- for (i = N_TX_RINGS; i > 0; i--)
- spin_unlock(&cp->tx_lock[i - 1]);
+ for (i = N_TX_RINGS; i > 0; i--)
+ spin_unlock(&cp->tx_lock[i - 1]);
}
static inline void cas_unlock_all(struct cas *cp)
#ifdef USE_PCI_INTD
case 3:
#endif
- writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
+ writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
cp->regs + REG_PLUS_INTRN_MASK(ring));
break;
#endif
cas_disable_irq(cp, i);
}
-static inline void cas_buffer_init(cas_page_t *cp)
-{
- struct page *page = cp->buffer;
- atomic_set((atomic_t *)&page->lru.next, 1);
-}
-
-static inline int cas_buffer_count(cas_page_t *cp)
-{
- struct page *page = cp->buffer;
- return atomic_read((atomic_t *)&page->lru.next);
-}
-
-static inline void cas_buffer_inc(cas_page_t *cp)
-{
- struct page *page = cp->buffer;
- atomic_inc((atomic_t *)&page->lru.next);
-}
-
-static inline void cas_buffer_dec(cas_page_t *cp)
-{
- struct page *page = cp->buffer;
- atomic_dec((atomic_t *)&page->lru.next);
-}
-
static void cas_enable_irq(struct cas *cp, const int ring)
{
if (ring == 0) { /* all but TX_DONE */
if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
return;
- writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
+ writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
cp->regs + REG_BIM_LOCAL_DEV_EN);
writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
#endif
}
-/* access to the phy. the following assumes that we've initialized the MIF to
+/* access to the phy. the following assumes that we've initialized the MIF to
* be in frame rather than bit-bang mode
*/
static u16 cas_phy_read(struct cas *cp, int reg)
cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
cmd |= MIF_FRAME_TURN_AROUND_MSB;
writel(cmd, cp->regs + REG_MIF_FRAME);
-
+
/* poll for completion */
while (limit-- > 0) {
udelay(10);
cmd |= MIF_FRAME_TURN_AROUND_MSB;
cmd |= val & MIF_FRAME_DATA_MASK;
writel(cmd, cp->regs + REG_MIF_FRAME);
-
+
/* poll for completion */
while (limit-- > 0) {
udelay(10);
static void cas_phy_powerup(struct cas *cp)
{
- u16 ctl = cas_phy_read(cp, MII_BMCR);
+ u16 ctl = cas_phy_read(cp, MII_BMCR);
if ((ctl & BMCR_PDOWN) == 0)
return;
static void cas_phy_powerdown(struct cas *cp)
{
- u16 ctl = cas_phy_read(cp, MII_BMCR);
+ u16 ctl = cas_phy_read(cp, MII_BMCR);
if (ctl & BMCR_PDOWN)
return;
/* cp->lock held. note: the last put_page will free the buffer */
static int cas_page_free(struct cas *cp, cas_page_t *page)
{
- pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
+ pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
PCI_DMA_FROMDEVICE);
- cas_buffer_dec(page);
__free_pages(page->buffer, cp->page_order);
kfree(page);
return 0;
#define RX_USED_ADD(x, y) ((x)->used += (y))
#define RX_USED_SET(x, y) ((x)->used = (y))
#else
-#define RX_USED_ADD(x, y)
+#define RX_USED_ADD(x, y)
#define RX_USED_SET(x, y)
#endif
page->buffer = alloc_pages(flags, cp->page_order);
if (!page->buffer)
goto page_err;
- cas_buffer_init(page);
page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
cp->page_size, PCI_DMA_FROMDEVICE);
return page;
/* free spare buffers */
INIT_LIST_HEAD(&list);
spin_lock(&cp->rx_spare_lock);
- list_splice(&cp->rx_spare_list, &list);
- INIT_LIST_HEAD(&cp->rx_spare_list);
+ list_splice_init(&cp->rx_spare_list, &list);
spin_unlock(&cp->rx_spare_lock);
list_for_each_safe(elem, tmp, &list) {
cas_page_free(cp, list_entry(elem, cas_page_t, list));
* lock than used everywhere else to manipulate this list.
*/
spin_lock(&cp->rx_inuse_lock);
- list_splice(&cp->rx_inuse_list, &list);
- INIT_LIST_HEAD(&cp->rx_inuse_list);
+ list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_inuse_lock);
#else
spin_lock(&cp->rx_spare_lock);
- list_splice(&cp->rx_inuse_list, &list);
- INIT_LIST_HEAD(&cp->rx_inuse_list);
+ list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_spare_lock);
#endif
list_for_each_safe(elem, tmp, &list) {
/* make a local copy of the list */
INIT_LIST_HEAD(&list);
spin_lock(&cp->rx_inuse_lock);
- list_splice(&cp->rx_inuse_list, &list);
- INIT_LIST_HEAD(&cp->rx_inuse_list);
+ list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_inuse_lock);
-
+
list_for_each_safe(elem, tmp, &list) {
cas_page_t *page = list_entry(elem, cas_page_t, list);
- if (cas_buffer_count(page) > 1)
+ /*
+ * With the lockless pagecache, cassini buffering scheme gets
+ * slightly less accurate: we might find that a page has an
+ * elevated reference count here, due to a speculative ref,
+ * and skip it as in-use. Ideally we would be able to reclaim
+ * it. However this would be such a rare case, it doesn't
+ * matter too much as we should pick it up the next time round.
+ *
+ * Importantly, if we find that the page has a refcount of 1
+ * here (our refcount), then we know it is definitely not inuse
+ * so we can reuse it.
+ */
+ if (page_count(page->buffer) > 1)
continue;
list_del(elem);
list_splice(&list, &cp->rx_inuse_list);
spin_unlock(&cp->rx_inuse_lock);
}
-
+
spin_lock(&cp->rx_spare_lock);
needed = cp->rx_spares_needed;
spin_unlock(&cp->rx_spare_lock);
i = 0;
while (i < needed) {
cas_page_t *spare = cas_page_alloc(cp, flags);
- if (!spare)
+ if (!spare)
break;
list_add(&spare->list, &list);
i++;
cas_spare_recover(cp, GFP_ATOMIC);
spin_lock(&cp->rx_spare_lock);
if (list_empty(&cp->rx_spare_list)) {
- if (netif_msg_rx_err(cp))
- printk(KERN_ERR "%s: no spare buffers "
- "available.\n", cp->dev->name);
+ netif_err(cp, rx_err, cp->dev,
+ "no spare buffers available\n");
spin_unlock(&cp->rx_spare_lock);
return NULL;
}
static void cas_mif_poll(struct cas *cp, const int enable)
{
u32 cfg;
-
- cfg = readl(cp->regs + REG_MIF_CFG);
+
+ cfg = readl(cp->regs + REG_MIF_CFG);
cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
if (cp->phy_type & CAS_PHY_MII_MDIO1)
- cfg |= MIF_CFG_PHY_SELECT;
+ cfg |= MIF_CFG_PHY_SELECT;
/* poll and interrupt on link status change. */
if (enable) {
cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
}
- writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
- cp->regs + REG_MIF_MASK);
+ writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
+ cp->regs + REG_MIF_MASK);
writel(cfg, cp->regs + REG_MIF_CFG);
}
#endif
start_aneg:
if (cp->lstate == link_up) {
- printk(KERN_INFO "%s: PCS link down.\n",
- cp->dev->name);
+ netdev_info(cp->dev, "PCS link down\n");
} else {
if (changed) {
- printk(KERN_INFO "%s: link configuration changed\n",
- cp->dev->name);
+ netdev_info(cp->dev, "link configuration changed\n");
}
}
cp->lstate = link_down;
/*
* WTZ: If the old state was link_up, we turn off the carrier
* to replicate everything we do elsewhere on a link-down
- * event when we were already in a link-up state..
+ * event when we were already in a link-up state..
*/
if (oldstate == link_up)
netif_carrier_off(cp->dev);
/*
* WTZ: This branch will simply schedule a full reset after
* we explicitly changed link modes in an ioctl. See if this
- * fixes the link-problems we were having for forced mode.
+ * fixes the link-problems we were having for forced mode.
*/
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_all);
} else {
cas_mif_poll(cp, 0);
ctl = cas_phy_read(cp, MII_BMCR);
- ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
+ ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
ctl |= cp->link_cntl;
if (ctl & BMCR_ANENABLE) {
{
int limit = STOP_TRIES_PHY;
u16 val;
-
+
cas_phy_write(cp, MII_BMCR, BMCR_RESET);
udelay(100);
- while (limit--) {
+ while (--limit) {
val = cas_phy_read(cp, MII_BMCR);
if ((val & BMCR_RESET) == 0)
break;
return (limit <= 0);
}
+static int cas_saturn_firmware_init(struct cas *cp)
+{
+ const struct firmware *fw;
+ const char fw_name[] = "sun/cassini.bin";
+ int err;
+
+ if (PHY_NS_DP83065 != cp->phy_id)
+ return 0;
+
+ err = request_firmware(&fw, fw_name, &cp->pdev->dev);
+ if (err) {
+ pr_err("Failed to load firmware \"%s\"\n",
+ fw_name);
+ return err;
+ }
+ if (fw->size < 2) {
+ pr_err("bogus length %zu in \"%s\"\n",
+ fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
+ cp->fw_size = fw->size - 2;
+ cp->fw_data = vmalloc(cp->fw_size);
+ if (!cp->fw_data) {
+ err = -ENOMEM;
+ pr_err("\"%s\" Failed %d\n", fw_name, err);
+ goto out;
+ }
+ memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
+out:
+ release_firmware(fw);
+ return err;
+}
+
static void cas_saturn_firmware_load(struct cas *cp)
{
- cas_saturn_patch_t *patch = cas_saturn_patch;
+ int i;
cas_phy_powerdown(cp);
/* download new firmware */
cas_phy_write(cp, DP83065_MII_MEM, 0x1);
- cas_phy_write(cp, DP83065_MII_REGE, patch->addr);
- while (patch->addr) {
- cas_phy_write(cp, DP83065_MII_REGD, patch->val);
- patch++;
- }
+ cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
+ for (i = 0; i < cp->fw_size; i++)
+ cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
/* enable firmware */
cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
val = cas_phy_read(cp, BROADCOM_MII_REG4);
if (val & 0x0080) {
/* link workaround */
- cas_phy_write(cp, BROADCOM_MII_REG4,
+ cas_phy_write(cp, BROADCOM_MII_REG4,
val & ~0x0080);
}
-
+
} else if (cp->cas_flags & CAS_FLAG_SATURN) {
- writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
- SATURN_PCFG_FSI : 0x0,
+ writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
+ SATURN_PCFG_FSI : 0x0,
cp->regs + REG_SATURN_PCFG);
/* load firmware to address 10Mbps auto-negotiation
- * issue. NOTE: this will need to be changed if the
+ * issue. NOTE: this will need to be changed if the
* default firmware gets fixed.
*/
if (PHY_NS_DP83065 == cp->phy_id) {
cas_phy_read(cp, MII_ADVERTISE) |
(ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL |
- CAS_ADVERTISE_PAUSE |
+ CAS_ADVERTISE_PAUSE |
CAS_ADVERTISE_ASYM_PAUSE));
-
+
if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
/* make sure that we don't advertise half
* duplex to avoid a chip issue
writel(val, cp->regs + REG_PCS_MII_CTRL);
limit = STOP_TRIES;
- while (limit-- > 0) {
+ while (--limit > 0) {
udelay(10);
- if ((readl(cp->regs + REG_PCS_MII_CTRL) &
+ if ((readl(cp->regs + REG_PCS_MII_CTRL) &
PCS_MII_RESET) == 0)
break;
}
if (limit <= 0)
- printk(KERN_WARNING "%s: PCS reset bit would not "
- "clear [%08x].\n", cp->dev->name,
- readl(cp->regs + REG_PCS_STATE_MACHINE));
+ netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
+ readl(cp->regs + REG_PCS_STATE_MACHINE));
/* Make sure PCS is disabled while changing advertisement
* configuration.
/* Advertise all capabilities except half-duplex. */
val = readl(cp->regs + REG_PCS_MII_ADVERT);
val &= ~PCS_MII_ADVERT_HD;
- val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
+ val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
PCS_MII_ADVERT_ASYM_PAUSE);
writel(val, cp->regs + REG_PCS_MII_ADVERT);
*/
if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
PCS_MII_STATUS_REMOTE_FAULT)) ==
- (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: PCS RemoteFault\n",
- cp->dev->name);
- }
+ (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
+ netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
/* work around link detection issue by querying the PCS state
* machine directly.
if (cp->opened) {
cp->lstate = link_up;
cp->link_transition = LINK_TRANSITION_LINK_UP;
-
+
cas_set_link_modes(cp);
netif_carrier_on(cp->dev);
}
cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
!cp->link_transition_jiffies_valid) {
/*
- * force a reset, as a workaround for the
- * link-failure problem. May want to move this to a
+ * force a reset, as a workaround for the
+ * link-failure problem. May want to move this to a
* point a bit earlier in the sequence. If we had
* generated a reset a short time ago, we'll wait for
* the link timer to check the status until a
cp->link_transition = LINK_TRANSITION_ON_FAILURE;
}
netif_carrier_off(cp->dev);
- if (cp->opened && netif_msg_link(cp)) {
- printk(KERN_INFO "%s: PCS link down.\n",
- cp->dev->name);
- }
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "PCS link down\n");
/* Cassini only: if you force a mode, there can be
* sync problems on link down. to fix that, the following
return retval;
}
-static int cas_pcs_interrupt(struct net_device *dev,
+static int cas_pcs_interrupt(struct net_device *dev,
struct cas *cp, u32 status)
{
u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
- if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
+ if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
return 0;
return cas_pcs_link_check(cp);
}
-static int cas_txmac_interrupt(struct net_device *dev,
+static int cas_txmac_interrupt(struct net_device *dev,
struct cas *cp, u32 status)
{
u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
if (!txmac_stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
- cp->dev->name, txmac_stat);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
/* Defer timer expiration is quite normal,
* don't even log the event.
spin_lock(&cp->stat_lock[0]);
if (txmac_stat & MAC_TX_UNDERRUN) {
- printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
- dev->name);
+ netdev_err(dev, "TX MAC xmit underrun\n");
cp->net_stats[0].tx_fifo_errors++;
}
if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
- printk(KERN_ERR "%s: TX MAC max packet size error.\n",
- dev->name);
+ netdev_err(dev, "TX MAC max packet size error\n");
cp->net_stats[0].tx_errors++;
}
return 0;
}
-static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
+static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
{
cas_hp_inst_t *inst;
u32 val;
static void cas_init_rx_dma(struct cas *cp)
{
- u64 desc_dma = cp->block_dvma;
+ u64 desc_dma = cp->block_dvma;
u32 val;
int i, size;
/* rx free descriptors */
- val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
+ val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
if ((N_RX_DESC_RINGS > 1) &&
val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
writel(val, cp->regs + REG_RX_CFG);
- val = (unsigned long) cp->init_rxds[0] -
+ val = (unsigned long) cp->init_rxds[0] -
(unsigned long) cp->init_block;
writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
- /* rx desc 2 is for IPSEC packets. however,
+ /* rx desc 2 is for IPSEC packets. however,
* we don't it that for that purpose.
*/
- val = (unsigned long) cp->init_rxds[1] -
+ val = (unsigned long) cp->init_rxds[1] -
(unsigned long) cp->init_block;
writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
- writel((desc_dma + val) & 0xffffffff, cp->regs +
+ writel((desc_dma + val) & 0xffffffff, cp->regs +
REG_PLUS_RX_DB1_LOW);
- writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
+ writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
REG_PLUS_RX_KICK1);
}
-
+
/* rx completion registers */
- val = (unsigned long) cp->init_rxcs[0] -
+ val = (unsigned long) cp->init_rxcs[0] -
(unsigned long) cp->init_block;
writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
/* rx comp 2-4 */
for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
- val = (unsigned long) cp->init_rxcs[i] -
+ val = (unsigned long) cp->init_rxcs[i] -
(unsigned long) cp->init_block;
- writel((desc_dma + val) >> 32, cp->regs +
+ writel((desc_dma + val) >> 32, cp->regs +
REG_PLUS_RX_CBN_HI(i));
- writel((desc_dma + val) & 0xffffffff, cp->regs +
+ writel((desc_dma + val) & 0xffffffff, cp->regs +
REG_PLUS_RX_CBN_LOW(i));
}
}
/* 2 is different from 3 and 4 */
if (N_RX_COMP_RINGS > 1)
- writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
+ writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
cp->regs + REG_PLUS_ALIASN_CLEAR(1));
- for (i = 2; i < N_RX_COMP_RINGS; i++)
- writel(INTR_RX_DONE_ALT,
+ for (i = 2; i < N_RX_COMP_RINGS; i++)
+ writel(INTR_RX_DONE_ALT,
cp->regs + REG_PLUS_ALIASN_CLEAR(i));
}
/* set up pause thresholds */
val = CAS_BASE(RX_PAUSE_THRESH_OFF,
cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
- val |= CAS_BASE(RX_PAUSE_THRESH_ON,
+ val |= CAS_BASE(RX_PAUSE_THRESH_ON,
cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
writel(val, cp->regs + REG_RX_PAUSE_THRESH);
-
+
/* zero out dma reassembly buffers */
for (i = 0; i < 64; i++) {
writel(i, cp->regs + REG_RX_TABLE_ADDR);
* this should be tunable.
*/
writel(0x0, cp->regs + REG_RX_RED);
-
+
/* receive page sizes. default == 2K (0x800) */
val = 0;
if (cp->page_size == 0x1000)
val = 0x2;
else if (cp->page_size == 0x4000)
val = 0x3;
-
+
/* round mtu + offset. constrain to page size. */
size = cp->dev->mtu + 64;
if (size > cp->page_size)
cp->mtu_stride = 1 << (i + 10);
val = CAS_BASE(RX_PAGE_SIZE, val);
- val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
writel(val, cp->regs + REG_RX_PAGE_SIZE);
-
+
/* enable the header parser if desired */
if (CAS_HP_FIRMWARE == cas_prog_null)
return;
static inline void cas_rxc_init(struct cas_rx_comp *rxc)
{
memset(rxc, 0, sizeof(*rxc));
- rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
+ rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
}
/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
cas_page_t *page = cp->rx_pages[1][index];
cas_page_t *new;
- if (cas_buffer_count(page) == 1)
+ if (page_count(page->buffer) == 1)
return page;
new = cas_page_dequeue(cp);
}
return new;
}
-
+
/* this needs to be changed if we actually use the ENC RX DESC ring */
-static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
+static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
const int index)
{
cas_page_t **page0 = cp->rx_pages[0];
cas_page_t **page1 = cp->rx_pages[1];
/* swap if buffer is in use */
- if (cas_buffer_count(page0[index]) > 1) {
+ if (page_count(page0[index]->buffer) > 1) {
cas_page_t *new = cas_page_spare(cp, index);
if (new) {
page1[index] = page0[index];
page0[index] = new;
}
- }
+ }
RX_USED_SET(page0[index], 0);
return page0[index];
}
for (i = 0; i < size; i++) {
cas_page_t *page = cas_page_swap(cp, 0, i);
rxd[i].buffer = cpu_to_le64(page->dma_addr);
- rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
+ rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
CAS_BASE(RX_INDEX_RING, 0));
}
- cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
+ cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
cp->rx_last[0] = 0;
cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
}
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
return 1;
}
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
return 1;
}
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX reset command will not execute, "
- "resetting whole chip.\n", dev->name);
+ netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
return 1;
}
if (!stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
- cp->dev->name, stat);
+ netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
/* these are all rollovers */
spin_lock(&cp->stat_lock[0]);
- if (stat & MAC_RX_ALIGN_ERR)
+ if (stat & MAC_RX_ALIGN_ERR)
cp->net_stats[0].rx_frame_errors += 0x10000;
if (stat & MAC_RX_CRC_ERR)
if (!stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
- cp->dev->name, stat);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "mac interrupt, stat: 0x%x\n", stat);
/* This interrupt is just for pause frame and pause
* tracking. It is useful for diagnostics and debug
return 0;
}
-
+
/* Must be invoked under cp->lock. */
static inline int cas_mdio_link_not_up(struct cas *cp)
{
u16 val;
-
+
switch (cp->lstate) {
case link_force_ret:
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: Autoneg failed again, keeping"
- " forced mode\n", cp->dev->name);
+ netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
cp->timer_ticks = 5;
cp->lstate = link_force_ok;
cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
break;
-
+
case link_aneg:
val = cas_phy_read(cp, MII_BMCR);
*/
val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
val |= BMCR_FULLDPLX;
- val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
+ val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
CAS_BMCR_SPEED1000 : BMCR_SPEED100;
cas_phy_write(cp, MII_BMCR, val);
cp->timer_ticks = 5;
if (bmsr & BMSR_LSTATUS) {
/* Ok, here we got a link. If we had it due to a forced
- * fallback, and we were configured for autoneg, we
+ * fallback, and we were configured for autoneg, we
* retry a short autoneg pass. If you know your hub is
* broken, use ethtool ;)
*/
- if ((cp->lstate == link_force_try) &&
+ if ((cp->lstate == link_force_try) &&
(cp->link_cntl & BMCR_ANENABLE)) {
cp->lstate = link_force_ret;
cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
cas_mif_poll(cp, 0);
cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
cp->timer_ticks = 5;
- if (cp->opened && netif_msg_link(cp))
- printk(KERN_INFO "%s: Got link after fallback, retrying"
- " autoneg once...\n", cp->dev->name);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev,
+ "Got link after fallback, retrying autoneg once...\n");
cas_phy_write(cp, MII_BMCR,
cp->link_fcntl | BMCR_ANENABLE |
BMCR_ANRESTART);
cp->link_transition = LINK_TRANSITION_LINK_DOWN;
netif_carrier_off(cp->dev);
- if (cp->opened && netif_msg_link(cp))
- printk(KERN_INFO "%s: Link down\n",
- cp->dev->name);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "Link down\n");
restart = 1;
-
+
} else if (++cp->timer_ticks > 10)
cas_mdio_link_not_up(cp);
-
+
return restart;
}
if (!stat)
return 0;
- printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
- readl(cp->regs + REG_BIM_DIAG));
+ netdev_err(dev, "PCI error [%04x:%04x]",
+ stat, readl(cp->regs + REG_BIM_DIAG));
/* cassini+ has this reserved */
if ((stat & PCI_ERR_BADACK) &&
((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
- printk("<No ACK64# during ABS64 cycle> ");
+ pr_cont(" <No ACK64# during ABS64 cycle>");
if (stat & PCI_ERR_DTRTO)
- printk("<Delayed transaction timeout> ");
+ pr_cont(" <Delayed transaction timeout>");
if (stat & PCI_ERR_OTHER)
- printk("<other> ");
+ pr_cont(" <other>");
if (stat & PCI_ERR_BIM_DMA_WRITE)
- printk("<BIM DMA 0 write req> ");
+ pr_cont(" <BIM DMA 0 write req>");
if (stat & PCI_ERR_BIM_DMA_READ)
- printk("<BIM DMA 0 read req> ");
- printk("\n");
+ pr_cont(" <BIM DMA 0 read req>");
+ pr_cont("\n");
if (stat & PCI_ERR_OTHER) {
u16 cfg;
* true cause.
*/
pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
- printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
- dev->name, cfg);
+ netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
if (cfg & PCI_STATUS_PARITY)
- printk(KERN_ERR "%s: PCI parity error detected.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error detected\n");
if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI target abort\n");
if (cfg & PCI_STATUS_REC_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI master acks target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI master acks target abort\n");
if (cfg & PCI_STATUS_REC_MASTER_ABORT)
- printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
+ netdev_err(dev, "PCI master abort\n");
if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
- printk(KERN_ERR "%s: PCI system error SERR#.\n",
- dev->name);
+ netdev_err(dev, "PCI system error SERR#\n");
if (cfg & PCI_STATUS_DETECTED_PARITY)
- printk(KERN_ERR "%s: PCI parity error.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error\n");
/* Write the error bits back to clear them. */
cfg &= (PCI_STATUS_PARITY |
{
if (status & INTR_RX_TAG_ERROR) {
/* corrupt RX tag framing */
- if (netif_msg_rx_err(cp))
- printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
- cp->dev->name);
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "corrupt rx tag framing\n");
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_errors++;
spin_unlock(&cp->stat_lock[0]);
if (status & INTR_RX_LEN_MISMATCH) {
/* length mismatch. */
- if (netif_msg_rx_err(cp))
- printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
- cp->dev->name);
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "length mismatch for rx frame\n");
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_errors++;
spin_unlock(&cp->stat_lock[0]);
#if 1
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_all);
- printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
- dev->name, status);
+ netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
- printk(KERN_ERR "reset called in cas_abnormal_irq\n");
+ netdev_err(dev, "reset called in cas_abnormal_irq\n");
schedule_work(&cp->reset_task);
#endif
return 1;
if (count < 0)
break;
- if (netif_msg_tx_done(cp))
- printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
- cp->dev->name, ring, entry);
+ netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
+ "tx[%d] done, slot %d\n", ring, entry);
skbs[entry] = NULL;
cp->tx_tiny_use[ring][entry].nbufs = 0;
-
+
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
struct cas_tx_desc *txd = txds + entry;
if (cp->tx_tiny_use[ring][entry].used) {
cp->tx_tiny_use[ring][entry].used = 0;
entry = TX_DESC_NEXT(ring, entry);
- }
+ }
}
spin_lock(&cp->stat_lock[ring]);
#ifdef USE_TX_COMPWB
u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
#endif
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
- cp->dev->name, status, (unsigned long long)compwb);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "tx interrupt, status: 0x%x, %llx\n",
+ status, (unsigned long long)compwb);
/* process all the rings */
for (ring = 0; ring < N_TX_RINGS; ring++) {
#ifdef USE_TX_COMPWB
#else
limit = readl(cp->regs + REG_TX_COMPN(ring));
#endif
- if (cp->tx_old[ring] != limit)
+ if (cp->tx_old[ring] != limit)
cas_tx_ringN(cp, ring, limit);
}
}
-static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
- int entry, const u64 *words,
+static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
+ int entry, const u64 *words,
struct sk_buff **skbref)
{
int dlen, hlen, len, i, alloclen;
struct cas_page *page;
struct sk_buff *skb;
void *addr, *crcaddr;
- char *p;
+ __sum16 csum;
+ char *p;
hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
len = hlen + dlen;
- if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
+ if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
alloclen = len;
- else
+ else
alloclen = max(hlen, RX_COPY_MIN);
skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
- if (skb == NULL)
+ if (skb == NULL)
return -1;
*skbref = skb;
- skb->dev = cp->dev;
skb_reserve(skb, swivel);
p = skb->data;
if (hlen) { /* always copy header pages */
i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
- off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
+ off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
swivel;
i = hlen;
RX_USED_ADD(page, 0x100);
p += hlen;
swivel = 0;
- }
+ }
if (alloclen < (hlen + dlen)) {
hlen = min(cp->page_size - off, dlen);
if (hlen < 0) {
- if (netif_msg_rx_err(cp)) {
- printk(KERN_DEBUG "%s: rx page overflow: "
- "%d\n", cp->dev->name, hlen);
- }
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
dev_kfree_skb_irq(skb);
return -1;
}
skb_shinfo(skb)->nr_frags++;
skb->data_len += hlen - swivel;
+ skb->truesize += hlen - swivel;
skb->len += hlen - swivel;
get_page(page->buffer);
- cas_buffer_inc(page);
frag->page = page->buffer;
frag->page_offset = off;
frag->size = hlen - swivel;
-
+
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
hlen = dlen;
i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
- pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
- hlen + cp->crc_size,
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+ hlen + cp->crc_size,
PCI_DMA_FROMDEVICE);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
hlen + cp->crc_size,
skb_shinfo(skb)->nr_frags++;
skb->data_len += hlen;
- skb->len += hlen;
+ skb->len += hlen;
frag++;
get_page(page->buffer);
- cas_buffer_inc(page);
frag->page = page->buffer;
frag->page_offset = 0;
frag->size = hlen;
off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
hlen = min(cp->page_size - off, dlen);
if (hlen < 0) {
- if (netif_msg_rx_err(cp)) {
- printk(KERN_DEBUG "%s: rx page overflow: "
- "%d\n", cp->dev->name, hlen);
- }
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
dev_kfree_skb_irq(skb);
return -1;
}
RX_USED_ADD(page, cp->mtu_stride);
else
RX_USED_ADD(page, i);
-
+
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
p += hlen;
i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
- pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
- dlen + cp->crc_size,
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+ dlen + cp->crc_size,
PCI_DMA_FROMDEVICE);
addr = cas_page_map(page->buffer);
memcpy(p, addr, dlen + cp->crc_size);
dlen + cp->crc_size,
PCI_DMA_FROMDEVICE);
cas_page_unmap(addr);
- RX_USED_ADD(page, dlen + cp->crc_size);
+ RX_USED_ADD(page, dlen + cp->crc_size);
}
end_copy_pkt:
if (cp->crc_size) {
skb_put(skb, alloclen);
}
- i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]);
+ csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
if (cp->crc_size) {
/* checksum includes FCS. strip it out. */
- i = csum_fold(csum_partial(crcaddr, cp->crc_size, i));
+ csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
+ csum_unfold(csum)));
if (addr)
cas_page_unmap(addr);
}
- skb->csum = ntohs(i ^ 0xffff);
- skb->ip_summed = CHECKSUM_HW;
skb->protocol = eth_type_trans(skb, cp->dev);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ skb->csum = csum_unfold(~csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
return len;
}
/* we can handle up to 64 rx flows at a time. we do the same thing
- * as nonreassm except that we batch up the buffers.
+ * as nonreassm except that we batch up the buffers.
* NOTE: we currently just treat each flow as a bunch of packets that
* we pass up. a better way would be to coalesce the packets
* into a jumbo packet. to do that, we need to do the following:
* data length and merge the checksums.
* 3) on flow release, fix up the header.
* 4) make sure the higher layer doesn't care.
- * because packets get coalesced, we shouldn't run into fragment count
+ * because packets get coalesced, we shouldn't run into fragment count
* issues.
*/
static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
{
int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
struct sk_buff_head *flow = &cp->rx_flows[flowid];
-
- /* this is protected at a higher layer, so no need to
+
+ /* this is protected at a higher layer, so no need to
* do any additional locking here. stick the buffer
* at the end.
*/
- __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow);
+ __skb_queue_tail(flow, skb);
if (words[0] & RX_COMP1_RELEASE_FLOW) {
while ((skb = __skb_dequeue(flow))) {
cas_skb_release(skb);
new = cas_page_swap(cp, ring, index);
cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
cp->init_rxds[ring][entry].index =
- cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
+ cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
CAS_BASE(RX_INDEX_RING, ring));
entry = RX_DESC_ENTRY(ring, entry + 1);
cp->rx_old[ring] = entry;
-
+
if (entry % 4)
return;
if (ring == 0)
writel(entry, cp->regs + REG_RX_KICK);
else if ((N_RX_DESC_RINGS > 1) &&
- (cp->cas_flags & CAS_FLAG_REG_PLUS))
+ (cp->cas_flags & CAS_FLAG_REG_PLUS))
writel(entry, cp->regs + REG_PLUS_RX_KICK1);
}
entry = cp->rx_old[ring];
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
- cp->dev->name, ring, entry);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rxd[%d] interrupt, done: %d\n", ring, entry);
cluster = -1;
- count = entry & 0x3;
+ count = entry & 0x3;
last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
released = 0;
while (entry != last) {
/* make a new buffer if it's still in use */
- if (cas_buffer_count(page[entry]) > 1) {
+ if (page_count(page[entry]->buffer) > 1) {
cas_page_t *new = cas_page_dequeue(cp);
if (!new) {
- /* let the timer know that we need to
+ /* let the timer know that we need to
* do this again
*/
cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
if (!timer_pending(&cp->link_timer))
- mod_timer(&cp->link_timer, jiffies +
+ mod_timer(&cp->link_timer, jiffies +
CAS_LINK_FAST_TIMEOUT);
cp->rx_old[ring] = entry;
cp->rx_last[ring] = num ? num - released : 0;
spin_lock(&cp->rx_inuse_lock);
list_add(&page[entry]->list, &cp->rx_inuse_list);
spin_unlock(&cp->rx_inuse_lock);
- cp->init_rxds[ring][entry].buffer =
+ cp->init_rxds[ring][entry].buffer =
cpu_to_le64(new->dma_addr);
page[entry] = new;
-
+
}
if (++count == 4) {
}
cp->rx_old[ring] = entry;
- if (cluster < 0)
+ if (cluster < 0)
return 0;
if (ring == 0)
writel(cluster, cp->regs + REG_RX_KICK);
else if ((N_RX_DESC_RINGS > 1) &&
- (cp->cas_flags & CAS_FLAG_REG_PLUS))
+ (cp->cas_flags & CAS_FLAG_REG_PLUS))
writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
return 0;
}
/* process a completion ring. packets are set up in three basic ways:
* small packets: should be copied header + data in single buffer.
* large packets: header and data in a single buffer.
- * split packets: header in a separate buffer from data.
+ * split packets: header in a separate buffer from data.
* data may be in multiple pages. data may be > 256
- * bytes but in a single page.
+ * bytes but in a single page.
*
* NOTE: RX page posting is done in this routine as well. while there's
* the capability of using multiple RX completion rings, it isn't
* really worthwhile due to the fact that the page posting will
- * force serialization on the single descriptor ring.
+ * force serialization on the single descriptor ring.
*/
static int cas_rx_ringN(struct cas *cp, int ring, int budget)
{
int entry, drops;
int npackets = 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
- cp->dev->name, ring,
- readl(cp->regs + REG_RX_COMP_HEAD),
- cp->rx_new[ring]);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rx[%d] interrupt, done: %d/%d\n",
+ ring,
+ readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
entry = cp->rx_new[ring];
drops = 0;
while (1) {
struct cas_rx_comp *rxc = rxcs + entry;
- struct sk_buff *skb;
+ struct sk_buff *uninitialized_var(skb);
int type, len;
u64 words[4];
int i, dring;
*/
if (RX_DONT_BATCH || (type == 0x2)) {
/* non-reassm: these always get released */
- cas_skb_release(skb);
+ cas_skb_release(skb);
} else {
cas_rx_flow_pkt(cp, words, skb);
}
cp->net_stats[ring].rx_packets++;
cp->net_stats[ring].rx_bytes += len;
spin_unlock(&cp->stat_lock[ring]);
- cp->dev->last_rx = jiffies;
next:
npackets++;
i = CAS_VAL(RX_INDEX_NUM, i);
cas_post_page(cp, dring, i);
}
-
+
if (words[0] & RX_COMP1_RELEASE_DATA) {
i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
dring = CAS_VAL(RX_INDEX_RING, i);
}
/* skip to the next entry */
- entry = RX_COMP_ENTRY(ring, entry + 1 +
+ entry = RX_COMP_ENTRY(ring, entry + 1 +
CAS_VAL(RX_COMP1_SKIP, words[0]));
#ifdef USE_NAPI
if (budget && (npackets >= budget))
cp->rx_new[ring] = entry;
if (drops)
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- cp->dev->name);
+ netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
return npackets;
}
int last, entry;
last = cp->rx_cur[ring];
- entry = cp->rx_new[ring];
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
- dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
- entry);
-
+ entry = cp->rx_new[ring];
+ netif_printk(cp, intr, KERN_DEBUG, dev,
+ "rxc[%d] interrupt, done: %d/%d\n",
+ ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
+
/* zero and re-mark descriptors */
while (last != entry) {
cas_rxc_init(rxc + last);
if (ring == 0)
writel(last, cp->regs + REG_RX_COMP_TAIL);
- else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
+ else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
}
-/* cassini can use all four PCI interrupts for the completion ring.
+/* cassini can use all four PCI interrupts for the completion ring.
* rings 3 and 4 are identical
*/
#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
-static inline void cas_handle_irqN(struct net_device *dev,
+static inline void cas_handle_irqN(struct net_device *dev,
struct cas *cp, const u32 status,
const int ring)
{
- if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
+ if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
cas_post_rxcs_ringN(dev, cp, ring);
}
-static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t cas_interruptN(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct cas *cp = netdev_priv(dev);
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(dev);
+ napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, ring, 0);
#endif
static inline void cas_handle_irq1(struct cas *cp, const u32 status)
{
if (status & INTR_RX_BUF_UNAVAIL_1) {
- /* Frame arrived, no free RX buffers available.
+ /* Frame arrived, no free RX buffers available.
* NOTE: we can get this on a link transition. */
cas_post_rxds_ringN(cp, 1, 0);
spin_lock(&cp->stat_lock[1]);
spin_unlock(&cp->stat_lock[1]);
}
- if (status & INTR_RX_BUF_AE_1)
- cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
+ if (status & INTR_RX_BUF_AE_1)
+ cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
RX_AE_FREEN_VAL(1));
if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
}
/* ring 2 handles a few more events than 3 and 4 */
-static irqreturn_t cas_interrupt1(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t cas_interrupt1(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct cas *cp = netdev_priv(dev);
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(dev);
+ napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, 1, 0);
#endif
cas_abnormal_irq(dev, cp, status);
if (status & INTR_RX_BUF_UNAVAIL) {
- /* Frame arrived, no free RX buffers available.
+ /* Frame arrived, no free RX buffers available.
* NOTE: we can get this on a link transition.
*/
cas_post_rxds_ringN(cp, 0, 0);
cas_post_rxcs_ringN(dev, cp, 0);
}
-static irqreturn_t cas_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t cas_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct cas *cp = netdev_priv(dev);
if (status & INTR_RX_DONE) {
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(dev);
+ napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, 0, 0);
#endif
#ifdef USE_NAPI
-static int cas_poll(struct net_device *dev, int *budget)
+static int cas_poll(struct napi_struct *napi, int budget)
{
- struct cas *cp = netdev_priv(dev);
- int i, enable_intr, todo, credits;
+ struct cas *cp = container_of(napi, struct cas, napi);
+ struct net_device *dev = cp->dev;
+ int i, enable_intr, credits;
u32 status = readl(cp->regs + REG_INTR_STATUS);
unsigned long flags;
/* NAPI rx packets. we spread the credits across all of the
* rxc rings
- */
- todo = min(*budget, dev->quota);
-
- /* to make sure we're fair with the work we loop through each
- * ring N_RX_COMP_RING times with a request of
- * todo / N_RX_COMP_RINGS
+ *
+ * to make sure we're fair with the work we loop through each
+ * ring N_RX_COMP_RING times with a request of
+ * budget / N_RX_COMP_RINGS
*/
enable_intr = 1;
credits = 0;
for (i = 0; i < N_RX_COMP_RINGS; i++) {
int j;
for (j = 0; j < N_RX_COMP_RINGS; j++) {
- credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS);
- if (credits >= todo) {
+ credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
+ if (credits >= budget) {
enable_intr = 0;
goto rx_comp;
}
}
rx_comp:
- *budget -= credits;
- dev->quota -= credits;
-
/* final rx completion */
spin_lock_irqsave(&cp->lock, flags);
if (status)
#endif
spin_unlock_irqrestore(&cp->lock, flags);
if (enable_intr) {
- netif_rx_complete(dev);
+ napi_complete(napi);
cas_unmask_intr(cp);
- return 0;
}
- return 1;
+ return credits;
}
#endif
struct cas *cp = netdev_priv(dev);
cas_disable_irq(cp, 0);
- cas_interrupt(cp->pdev->irq, dev, NULL);
+ cas_interrupt(cp->pdev->irq, dev);
cas_enable_irq(cp, 0);
#ifdef USE_PCI_INTB
{
struct cas *cp = netdev_priv(dev);
- printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
if (!cp->hw_running) {
- printk("%s: hrm.. hw not running!\n", dev->name);
+ netdev_err(dev, "hrm.. hw not running!\n");
return;
}
- printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
- dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
-
- printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
- dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
-
- printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
- "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
- dev->name,
- readl(cp->regs + REG_TX_CFG),
- readl(cp->regs + REG_MAC_TX_STATUS),
- readl(cp->regs + REG_MAC_TX_CFG),
- readl(cp->regs + REG_TX_FIFO_PKT_CNT),
- readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
- readl(cp->regs + REG_TX_FIFO_READ_PTR),
- readl(cp->regs + REG_TX_SM_1),
- readl(cp->regs + REG_TX_SM_2));
-
- printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
- dev->name,
- readl(cp->regs + REG_RX_CFG),
- readl(cp->regs + REG_MAC_RX_STATUS),
- readl(cp->regs + REG_MAC_RX_CFG));
-
- printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
- dev->name,
- readl(cp->regs + REG_HP_STATE_MACHINE),
- readl(cp->regs + REG_HP_STATUS0),
- readl(cp->regs + REG_HP_STATUS1),
- readl(cp->regs + REG_HP_STATUS2));
+ netdev_err(dev, "MIF_STATE[%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE));
+
+ netdev_err(dev, "MAC_STATE[%08x]\n",
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+ netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
+ readl(cp->regs + REG_TX_CFG),
+ readl(cp->regs + REG_MAC_TX_STATUS),
+ readl(cp->regs + REG_MAC_TX_CFG),
+ readl(cp->regs + REG_TX_FIFO_PKT_CNT),
+ readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
+ readl(cp->regs + REG_TX_FIFO_READ_PTR),
+ readl(cp->regs + REG_TX_SM_1),
+ readl(cp->regs + REG_TX_SM_2));
+
+ netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_RX_CFG),
+ readl(cp->regs + REG_MAC_RX_STATUS),
+ readl(cp->regs + REG_MAC_RX_CFG));
+
+ netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_HP_STATE_MACHINE),
+ readl(cp->regs + REG_HP_STATUS0),
+ readl(cp->regs + REG_HP_STATUS1),
+ readl(cp->regs + REG_HP_STATUS2));
#if 1
atomic_inc(&cp->reset_task_pending);
txd->buffer = cpu_to_le64(mapping);
}
-static inline void *tx_tiny_buf(struct cas *cp, const int ring,
+static inline void *tx_tiny_buf(struct cas *cp, const int ring,
const int entry)
{
return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
}
-static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
+static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
const int entry, const int tentry)
{
cp->tx_tiny_use[ring][tentry].nbufs++;
return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
}
-static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
+static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
struct sk_buff *skb)
{
struct net_device *dev = cp->dev;
spin_lock_irqsave(&cp->tx_lock[ring], flags);
/* This is a hard error, log it. */
- if (TX_BUFFS_AVAIL(cp, ring) <=
+ if (TX_BUFFS_AVAIL(cp, ring) <=
CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return 1;
}
ctrl = 0;
- if (skb->ip_summed == CHECKSUM_HW) {
- u64 csum_start_off, csum_stuff_off;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const u64 csum_start_off = skb_transport_offset(skb);
+ const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
- csum_start_off = (u64) (skb->h.raw - skb->data);
- csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
-
- ctrl = TX_DESC_CSUM_EN |
+ ctrl = TX_DESC_CSUM_EN |
CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
}
tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
if (unlikely(tabort)) {
/* NOTE: len is always > tabort */
- cas_write_txd(cp, ring, entry, mapping, len - tabort,
+ cas_write_txd(cp, ring, entry, mapping, len - tabort,
ctrl | TX_DESC_SOF, 0);
entry = TX_DESC_NEXT(ring, entry);
- memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
- len - tabort, tabort);
+ skb_copy_from_linear_data_offset(skb, len - tabort,
+ tx_tiny_buf(cp, ring, entry), tabort);
mapping = tx_tiny_map(cp, ring, entry, tentry);
cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
(nr_frags == 0));
} else {
- cas_write_txd(cp, ring, entry, mapping, len, ctrl |
+ cas_write_txd(cp, ring, entry, mapping, len, ctrl |
TX_DESC_SOF, (nr_frags == 0));
}
entry = TX_DESC_NEXT(ring, entry);
cas_write_txd(cp, ring, entry, mapping, len - tabort,
ctrl, 0);
entry = TX_DESC_NEXT(ring, entry);
-
+
addr = cas_page_map(fragp->page);
memcpy(tx_tiny_buf(cp, ring, entry),
- addr + fragp->page_offset + len - tabort,
+ addr + fragp->page_offset + len - tabort,
tabort);
cas_page_unmap(addr);
mapping = tx_tiny_map(cp, ring, entry, tentry);
if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
- if (netif_msg_tx_queued(cp))
- printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
- "avail %d\n",
- dev->name, ring, entry, skb->len,
- TX_BUFFS_AVAIL(cp, ring));
+ netif_printk(cp, tx_queued, KERN_DEBUG, dev,
+ "tx[%d] queued, slot %d, skblen %d, avail %d\n",
+ ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
writel(entry, cp->regs + REG_TX_KICKN(ring));
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
return 0;
-}
+}
-static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
/* this is only used as a load-balancing hint, so it doesn't
* need to be SMP safe
*/
- static int ring;
+ static int ring;
if (skb_padto(skb, cp->min_frame_size))
- return 0;
+ return NETDEV_TX_OK;
/* XXX: we need some higher-level QoS hooks to steer packets to
* individual queues.
*/
if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
- return 1;
- dev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
static void cas_init_tx_dma(struct cas *cp)
/* enable completion writebacks, enable paced mode,
* disable read pipe, and disable pre-interrupt compwbs
*/
- val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
+ val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
- TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
+ TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
TX_CFG_INTR_COMPWB_DIS;
/* write out tx ring info and tx desc bases */
for (i = 0; i < MAX_TX_RINGS; i++) {
- off = (unsigned long) cp->init_txds[i] -
+ off = (unsigned long) cp->init_txds[i] -
(unsigned long) cp->init_block;
val |= CAS_TX_RINGN_BASE(i);
cas_init_rx_dma(cp);
}
+static void cas_process_mc_list(struct cas *cp)
+{
+ u16 hash_table[16];
+ u32 crc;
+ struct netdev_hw_addr *ha;
+ int i = 1;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(ha, cp->dev) {
+ if (i <= CAS_MC_EXACT_MATCH_SIZE) {
+ /* use the alternate mac address registers for the
+ * first 15 multicast addresses
+ */
+ writel((ha->addr[4] << 8) | ha->addr[5],
+ cp->regs + REG_MAC_ADDRN(i*3 + 0));
+ writel((ha->addr[2] << 8) | ha->addr[3],
+ cp->regs + REG_MAC_ADDRN(i*3 + 1));
+ writel((ha->addr[0] << 8) | ha->addr[1],
+ cp->regs + REG_MAC_ADDRN(i*3 + 2));
+ i++;
+ }
+ else {
+ /* use hw hash table for the next series of
+ * multicast addresses
+ */
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ crc >>= 24;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ }
+ }
+ for (i = 0; i < 16; i++)
+ writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
+}
+
/* Must be invoked under cp->lock. */
static u32 cas_setup_multicast(struct cas *cp)
{
u32 rxcfg = 0;
int i;
-
+
if (cp->dev->flags & IFF_PROMISC) {
rxcfg |= MAC_RX_CFG_PROMISC_EN;
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
} else {
- u16 hash_table[16];
- u32 crc;
- struct dev_mc_list *dmi = cp->dev->mc_list;
- int i;
-
- /* use the alternate mac address registers for the
- * first 15 multicast addresses
- */
- for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
- if (!dmi) {
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
- continue;
- }
- writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
- cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
- cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
- cp->regs + REG_MAC_ADDRN(i*3 + 2));
- dmi = dmi->next;
- }
-
- /* use hw hash table for the next series of
- * multicast addresses
- */
- memset(hash_table, 0, sizeof(hash_table));
- while (dmi) {
- crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
- crc >>= 24;
- hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
- dmi = dmi->next;
- }
- for (i=0; i < 16; i++)
- writel(hash_table[i], cp->regs +
- REG_MAC_HASH_TABLEN(i));
+ cas_process_mc_list(cp);
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
}
if (readl(cp->regs + REG_MAC_TX_RESET) |
readl(cp->regs + REG_MAC_RX_RESET))
- printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
- cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
- readl(cp->regs + REG_MAC_RX_RESET),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
+ readl(cp->regs + REG_MAC_TX_RESET),
+ readl(cp->regs + REG_MAC_RX_RESET),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
}
writel(0x00, cp->regs + REG_MAC_IPG0);
writel(0x08, cp->regs + REG_MAC_IPG1);
writel(0x04, cp->regs + REG_MAC_IPG2);
-
+
/* change later for 802.3z */
- writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
+ writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
/* min frame + FCS */
writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
/* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
- * specify the maximum frame size to prevent RX tag errors on
+ * specify the maximum frame size to prevent RX tag errors on
* oversized frames.
*/
writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
- CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
- (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
+ CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
+ (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
cp->regs + REG_MAC_FRAMESIZE_MAX);
- /* NOTE: crc_size is used as a surrogate for half-duplex.
+ /* NOTE: crc_size is used as a surrogate for half-duplex.
* workaround saturn half-duplex issue by increasing preamble
* size to 65 bytes.
*/
* spin_lock_irqsave, but we are called only in cas_init_hw and
* cas_init_hw is protected by cas_lock_all, which calls
* spin_lock_irq (so it doesn't need to save the flags, and
- * we should be OK for the writel, as that is the only
+ * we should be OK for the writel, as that is the only
* difference).
*/
cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
{
int len = strlen(str) + 1;
int i;
-
+
for (i = 0; i < len; i++) {
if (readb(p + i) != str[i])
return 0;
* number.
* 3) fiber cards don't have bridges, so their slot numbers don't
* mean anything.
- * 4) we don't actually know we have a fiber card until after
+ * 4) we don't actually know we have a fiber card until after
* the mac addresses are parsed.
*/
static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
(readb(p + i + 1) == 0x43) &&
(readb(p + i + 2) == 0x49) &&
(readb(p + i + 3) == 0x52)) {
- base = p + (readb(p + i + 8) |
+ base = p + (readb(p + i + 8) |
(readb(p + i + 9) << 8));
break;
- }
+ }
}
if (!base || (readb(base) != 0x82))
goto use_random_mac_addr;
-
+
i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
while (i < EXPANSION_ROM_SIZE) {
if (readb(base + i) != 0x90) /* no vpd found */
char type;
p += 3;
-
+
/* look for the following things:
* -- correct length == 29
- * 3 (type) + 2 (size) +
- * 18 (strlen("local-mac-address") + 1) +
- * 6 (mac addr)
+ * 3 (type) + 2 (size) +
+ * 18 (strlen("local-mac-address") + 1) +
+ * 6 (mac addr)
* -- VPD Instance 'I'
* -- VPD Type Bytes 'B'
* -- VPD data length == 6
* -- property string == local-mac-address
- *
+ *
* -- correct length == 24
- * 3 (type) + 2 (size) +
- * 12 (strlen("entropy-dev") + 1) +
+ * 3 (type) + 2 (size) +
+ * 12 (strlen("entropy-dev") + 1) +
* 7 (strlen("vms110") + 1)
* -- VPD Instance 'I'
* -- VPD Type String 'B'
* -- property string == entropy-dev
*
* -- correct length == 18
- * 3 (type) + 2 (size) +
- * 9 (strlen("phy-type") + 1) +
+ * 3 (type) + 2 (size) +
+ * 9 (strlen("phy-type") + 1) +
* 4 (strlen("pcs") + 1)
* -- VPD Instance 'I'
* -- VPD Type String 'S'
* -- VPD data length == 4
* -- property string == phy-type
- *
+ *
* -- correct length == 23
- * 3 (type) + 2 (size) +
- * 14 (strlen("phy-interface") + 1) +
+ * 3 (type) + 2 (size) +
+ * 14 (strlen("phy-interface") + 1) +
* 4 (strlen("pcs") + 1)
* -- VPD Instance 'I'
* -- VPD Type String 'S'
type = readb(p + 3);
if (type == 'B') {
if ((klen == 29) && readb(p + 4) == 6 &&
- cas_vpd_match(p + 5,
+ cas_vpd_match(p + 5,
"local-mac-address")) {
- if (mac_off++ > offset)
+ if (mac_off++ > offset)
goto next;
/* set mac address */
- for (j = 0; j < 6; j++)
- dev_addr[j] =
+ for (j = 0; j < 6; j++)
+ dev_addr[j] =
readb(p + 23 + j);
goto found_mac;
}
goto next;
#ifdef USE_ENTROPY_DEV
- if ((klen == 24) &&
+ if ((klen == 24) &&
cas_vpd_match(p + 5, "entropy-dev") &&
cas_vpd_match(p + 17, "vms110")) {
cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
goto found_phy;
}
}
-
+
if ((klen == 23) && readb(p + 4) == 4 &&
cas_vpd_match(p + 5, "phy-interface")) {
if (cas_vpd_match(p + 19, "pcs")) {
goto done;
/* Sun MAC prefix then 3 random bytes. */
- printk(PFX "MAC address not found in ROM VPD\n");
+ pr_info("MAC address not found in ROM VPD\n");
dev_addr[0] = 0x08;
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
static void cas_check_pci_invariants(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
- u8 rev;
cp->cas_flags = 0;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
(pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
- if (rev >= CAS_ID_REVPLUS)
+ if (pdev->revision >= CAS_ID_REVPLUS)
cp->cas_flags |= CAS_FLAG_REG_PLUS;
- if (rev < CAS_ID_REVPLUS02u)
+ if (pdev->revision < CAS_ID_REVPLUS02u)
cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
/* Original Cassini supports HW CSUM, but it's not
* enabled by default as it can trigger TX hangs.
*/
- if (rev < CAS_ID_REV2)
+ if (pdev->revision < CAS_ID_REV2)
cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
} else {
/* Only sun has original cassini chips. */
int i;
/* get page size for rx buffers. */
- cp->page_order = 0;
+ cp->page_order = 0;
#ifdef USE_PAGE_ORDER
if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
/* see if we can allocate larger pages */
- struct page *page = alloc_pages(GFP_ATOMIC,
- CAS_JUMBO_PAGE_SHIFT -
+ struct page *page = alloc_pages(GFP_ATOMIC,
+ CAS_JUMBO_PAGE_SHIFT -
PAGE_SHIFT);
if (page) {
__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
} else {
- printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
+ printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
}
}
#endif
cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
cp->rx_fifo_size = RX_FIFO_SIZE;
- /* finish phy determination. MDIO1 takes precedence over MDIO0 if
+ /* finish phy determination. MDIO1 takes precedence over MDIO0 if
* they're both connected.
*/
- cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
+ cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
PCI_SLOT(pdev->devfn));
if (cp->phy_type & CAS_PHY_SERDES) {
cp->cas_flags |= CAS_FLAG_1000MB_CAP;
return 0; /* no more checking needed */
- }
+ }
/* MII */
cfg = readl(cp->regs + REG_MIF_CFG);
}
}
}
- printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
+ pr_err("MII phy did not respond [%08x]\n",
readl(cp->regs + REG_MIF_STATE_MACHINE));
return -1;
done:
/* see if we can do gigabit */
cfg = cas_phy_read(cp, MII_BMSR);
- if ((cfg & CAS_BMSR_1000_EXTEND) &&
+ if ((cfg & CAS_BMSR_1000_EXTEND) &&
cas_phy_read(cp, CAS_MII_1000_EXTEND))
cp->cas_flags |= CAS_FLAG_1000MB_CAP;
return 0;
int i;
u32 val;
int txfailed = 0;
-
+
/* enable dma */
val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
writel(val, cp->regs + REG_TX_CFG);
val = readl(cp->regs + REG_MAC_RX_CFG);
if ((val & MAC_RX_CFG_EN)) {
if (txfailed) {
- printk(KERN_ERR
- "%s: enabling mac failed [tx:%08x:%08x].\n",
- cp->dev->name,
- readl(cp->regs + REG_MIF_STATE_MACHINE),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev,
+ "enabling mac failed [tx:%08x:%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
}
goto enable_rx_done;
}
udelay(10);
}
- printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
- cp->dev->name,
- (txfailed? "tx,rx":"rx"),
- readl(cp->regs + REG_MIF_STATE_MACHINE),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
+ (txfailed ? "tx,rx" : "rx"),
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
enable_rx_done:
cas_unmask_intr(cp); /* enable interrupts */
writel(0, cp->regs + REG_RX_COMP_TAIL);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
- if (N_RX_DESC_RINGS > 1)
- writel(RX_DESC_RINGN_SIZE(1) - 4,
+ if (N_RX_DESC_RINGS > 1)
+ writel(RX_DESC_RINGN_SIZE(1) - 4,
cp->regs + REG_PLUS_RX_KICK1);
- for (i = 1; i < N_RX_COMP_RINGS; i++)
+ for (i = 1; i < N_RX_COMP_RINGS; i++)
writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
}
}
*fd = 0;
*spd = 10;
*pause = 0;
-
+
/* use GMII registers */
val = cas_phy_read(cp, MII_LPA);
if (val & CAS_LPA_PAUSE)
cas_mif_poll(cp, 0);
val = cas_phy_read(cp, MII_BMCR);
if (val & BMCR_ANENABLE) {
- cas_read_mii_link_mode(cp, &full_duplex, &speed,
+ cas_read_mii_link_mode(cp, &full_duplex, &speed,
&pause);
} else {
if (val & BMCR_FULLDPLX)
}
}
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
- cp->dev->name, speed, (full_duplex ? "full" : "half"));
+ netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
+ speed, full_duplex ? "full" : "half");
val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
if (CAS_PHY_MII(cp->phy_type)) {
if (!full_duplex)
val |= MAC_XIF_DISABLE_ECHO;
}
- if (full_duplex)
+ if (full_duplex)
val |= MAC_XIF_FDPLX_LED;
if (speed == 1000)
val |= MAC_XIF_GMII_MODE;
/* val now set up for REG_MAC_TX_CFG */
/* If gigabit and half-duplex, enable carrier extension
- * mode. increase slot time to 512 bytes as well.
+ * mode. increase slot time to 512 bytes as well.
* else, disable it and make sure slot time is 64 bytes.
* also activate checksum bug workaround
*/
if ((speed == 1000) && !full_duplex) {
- writel(val | MAC_TX_CFG_CARRIER_EXTEND,
+ writel(val | MAC_TX_CFG_CARRIER_EXTEND,
cp->regs + REG_MAC_TX_CFG);
val = readl(cp->regs + REG_MAC_RX_CFG);
val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
- writel(val | MAC_RX_CFG_CARRIER_EXTEND,
+ writel(val | MAC_RX_CFG_CARRIER_EXTEND,
cp->regs + REG_MAC_RX_CFG);
writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
} else {
writel(val, cp->regs + REG_MAC_TX_CFG);
- /* checksum bug workaround. don't strip FCS when in
+ /* checksum bug workaround. don't strip FCS when in
* half-duplex mode
*/
val = readl(cp->regs + REG_MAC_RX_CFG);
cp->crc_size = 4;
cp->min_frame_size = CAS_MIN_FRAME;
}
- writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
+ writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
cp->regs + REG_MAC_RX_CFG);
writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
}
if (netif_msg_link(cp)) {
if (pause & 0x01) {
- printk(KERN_INFO "%s: Pause is enabled "
- "(rxfifo: %d off: %d on: %d)\n",
- cp->dev->name,
- cp->rx_fifo_size,
- cp->rx_pause_off,
- cp->rx_pause_on);
+ netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+ cp->rx_fifo_size,
+ cp->rx_pause_off,
+ cp->rx_pause_on);
} else if (pause & 0x10) {
- printk(KERN_INFO "%s: TX pause enabled\n",
- cp->dev->name);
+ netdev_info(cp->dev, "TX pause enabled\n");
} else {
- printk(KERN_INFO "%s: Pause is disabled\n",
- cp->dev->name);
+ netdev_info(cp->dev, "Pause is disabled\n");
}
}
val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
if (pause & 0x01) { /* symmetric pause */
val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
- }
+ }
}
writel(val, cp->regs + REG_MAC_CTRL_CFG);
cas_start_dma(cp);
*/
static void cas_hard_reset(struct cas *cp)
{
- writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
+ writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
udelay(20);
pci_restore_state(cp->pdev);
}
* need some special handling if the chip is set into a
* loopback mode.
*/
- writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
+ writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
cp->regs + REG_SW_RESET);
} else {
writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
goto done;
udelay(10);
}
- printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
+ netdev_err(cp->dev, "sw reset failed\n");
done:
/* enable various BIM interrupts */
- writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
+ writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
/* clear out pci error status mask for handled errors.
* we don't deal with DMA counter overflows as they happen
* all the time.
*/
- writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
- PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
- PCI_ERR_BIM_DMA_READ), cp->regs +
+ writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
+ PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
+ PCI_ERR_BIM_DMA_READ), cp->regs +
REG_PCI_ERR_STATUS_MASK);
/* set up for MII by default to address mac rx reset timeout
#else
while (atomic_read(&cp->reset_task_pending))
schedule();
-#endif
+#endif
/* Actually stop the chip */
cas_lock_all_save(cp, flags);
cas_reset(cp, 0);
}
schedule_work(&cp->reset_task);
#else
- atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
+ atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
CAS_RESET_ALL : CAS_RESET_MTU);
- printk(KERN_ERR "reset called in cas_change_mtu\n");
+ pr_err("reset called in cas_change_mtu\n");
schedule_work(&cp->reset_task);
#endif
* needs to be unmapped.
*/
daddr = le64_to_cpu(txd[ent].buffer);
- dlen = CAS_VAL(TX_DESC_BUFLEN,
+ dlen = CAS_VAL(TX_DESC_BUFLEN,
le64_to_cpu(txd[ent].control));
pci_unmap_page(cp->pdev, daddr, dlen,
PCI_DMA_TODEVICE);
size = RX_DESC_RINGN_SIZE(ring);
for (i = 0; i < size; i++) {
- if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
+ if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
return -1;
}
return 0;
return 0;
}
-static void cas_reset_task(void *data)
+static void cas_reset_task(struct work_struct *work)
{
- struct cas *cp = (struct cas *) data;
+ struct cas *cp = container_of(work, struct cas, reset_task);
#if 0
int pending = atomic_read(&cp->reset_task_pending);
#else
* call to cas_init_hw will restart auto negotiation.
* Setting the second argument of cas_reset to
* !(pending == CAS_RESET_ALL) will set this argument
- * to 1 (avoiding reinitializing the PHY for the normal
+ * to 1 (avoiding reinitializing the PHY for the normal
* PCS case) when auto negotiation is not restarted.
*/
#if 1
if (link_transition_timeout != 0 &&
cp->link_transition_jiffies_valid &&
- ((jiffies - cp->link_transition_jiffies) >
+ ((jiffies - cp->link_transition_jiffies) >
(link_transition_timeout))) {
- /* One-second counter so link-down workaround doesn't
+ /* One-second counter so link-down workaround doesn't
* cause resets to occur so fast as to fool the switch
* into thinking the link is down.
*/
#if 1
if (atomic_read(&cp->reset_task_pending_all) ||
atomic_read(&cp->reset_task_pending_spare) ||
- atomic_read(&cp->reset_task_pending_mtu))
+ atomic_read(&cp->reset_task_pending_mtu))
goto done;
#else
- if (atomic_read(&cp->reset_task_pending))
+ if (atomic_read(&cp->reset_task_pending))
goto done;
#endif
if (((tlm == 0x5) || (tlm == 0x3)) &&
(CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
- if (netif_msg_tx_err(cp))
- printk(KERN_DEBUG "%s: tx err: "
- "MAC_STATE[%08x]\n",
- cp->dev->name, val);
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: MAC_STATE[%08x]\n", val);
reset = 1;
goto done;
}
wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
if ((val == 0) && (wptr != rptr)) {
- if (netif_msg_tx_err(cp))
- printk(KERN_DEBUG "%s: tx err: "
- "TX_FIFO[%08x:%08x:%08x]\n",
- cp->dev->name, val, wptr, rptr);
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: TX_FIFO[%08x:%08x:%08x]\n",
+ val, wptr, rptr);
reset = 1;
}
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
- printk(KERN_ERR "reset called in cas_link_timer\n");
+ pr_err("reset called in cas_link_timer\n");
schedule_work(&cp->reset_task);
#endif
}
spin_unlock_irqrestore(&cp->lock, flags);
}
-/* tiny buffers are used to avoid target abort issues with
+/* tiny buffers are used to avoid target abort issues with
* older cassini's
*/
static void cas_tx_tiny_free(struct cas *cp)
if (!cp->tx_tiny_bufs[i])
continue;
- pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
+ pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
cp->tx_tiny_bufs[i],
cp->tx_tiny_dvma[i]);
cp->tx_tiny_bufs[i] = NULL;
int i;
for (i = 0; i < N_TX_RINGS; i++) {
- cp->tx_tiny_bufs[i] =
+ cp->tx_tiny_bufs[i] =
pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
&cp->tx_tiny_dvma[i]);
if (!cp->tx_tiny_bufs[i]) {
/* Reset the chip */
cas_lock_all_save(cp, flags);
/* We set the second arg to cas_reset to zero
- * because cas_init_hw below will have its second
+ * because cas_init_hw below will have its second
* argument set to non-zero, which will force
* autonegotiation to start.
*/
cas_unlock_all_restore(cp, flags);
}
+ err = -ENOMEM;
if (cas_tx_tiny_alloc(cp) < 0)
- return -ENOMEM;
+ goto err_unlock;
/* alloc rx descriptors */
- err = -ENOMEM;
if (cas_alloc_rxds(cp) < 0)
goto err_tx_tiny;
-
+
/* allocate spares */
cas_spare_init(cp);
cas_spare_recover(cp, GFP_KERNEL);
/* We can now request the interrupt as we know it's masked
* on the controller. cassini+ has up to 4 interrupts
- * that can be used, but you need to do explicit pci interrupt
+ * that can be used, but you need to do explicit pci interrupt
* mapping to expose them
*/
if (request_irq(cp->pdev->irq, cas_interrupt,
- SA_SHIRQ, dev->name, (void *) dev)) {
- printk(KERN_ERR "%s: failed to request irq !\n",
- cp->dev->name);
+ IRQF_SHARED, dev->name, (void *) dev)) {
+ netdev_err(cp->dev, "failed to request irq !\n");
err = -EAGAIN;
goto err_spare;
}
+#ifdef USE_NAPI
+ napi_enable(&cp->napi);
+#endif
/* init hw */
cas_lock_all_save(cp, flags);
cas_clean_rings(cp);
cas_free_rxds(cp);
err_tx_tiny:
cas_tx_tiny_free(cp);
+err_unlock:
mutex_unlock(&cp->pm_mutex);
return err;
}
unsigned long flags;
struct cas *cp = netdev_priv(dev);
+#ifdef USE_NAPI
+ napi_disable(&cp->napi);
+#endif
/* Make sure we don't get distracted by suspend/resume */
mutex_lock(&cp->pm_mutex);
/* Stop traffic, mark us closed */
cas_lock_all_save(cp, flags);
- cp->opened = 0;
+ cp->opened = 0;
cas_reset(cp, 0);
- cas_phy_init(cp);
+ cas_phy_init(cp);
cas_begin_auto_negotiation(cp, NULL);
cas_clean_rings(cp);
cas_unlock_all_restore(cp, flags);
{"tx_fifo_errors"},
{"tx_packets"}
};
-#define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN)
+#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
static struct {
const int offsets; /* neg. values for 2nd arg to cas_read_phy */
{REG_MAC_COLL_EXCESS},
{REG_MAC_COLL_LATE}
};
-#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int))
+#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
/* we collate all of the stats into net_stats[N_TX_RING] */
if (!cp->hw_running)
return stats + N_TX_RINGS;
-
+
/* collect outstanding stats */
/* WTZ: the Cassini spec gives these as 16 bit counters but
* stored in 32-bit words. Added a mask of 0xffff to be safe,
* that consistent.
*/
spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
- stats[N_TX_RINGS].rx_crc_errors +=
+ stats[N_TX_RINGS].rx_crc_errors +=
readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
- stats[N_TX_RINGS].rx_frame_errors +=
+ stats[N_TX_RINGS].rx_frame_errors +=
readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
- stats[N_TX_RINGS].rx_length_errors +=
+ stats[N_TX_RINGS].rx_length_errors +=
readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
#if 1
tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
stats[N_TX_RINGS].collisions +=
tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
#else
- stats[N_TX_RINGS].tx_aborted_errors +=
+ stats[N_TX_RINGS].tx_aborted_errors +=
readl(cp->regs + REG_MAC_COLL_EXCESS);
stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
readl(cp->regs + REG_MAC_COLL_LATE);
for (i = 0; i < N_TX_RINGS; i++) {
spin_lock(&cp->stat_lock[i]);
- stats[N_TX_RINGS].rx_length_errors +=
+ stats[N_TX_RINGS].rx_length_errors +=
stats[i].rx_length_errors;
stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
u32 rxcfg, rxcfg_new;
unsigned long flags;
int limit = STOP_TRIES;
-
+
if (!cp->hw_running)
return;
-
+
spin_lock_irqsave(&cp->lock, flags);
rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->phy_address = cp->phy_addr;
cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
- ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
cmd->supported |=
- (SUPPORTED_10baseT_Half |
+ (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_TP | SUPPORTED_MII);
if (cp->hw_running) {
cas_mif_poll(cp, 0);
bmcr = cas_phy_read(cp, MII_BMCR);
- cas_read_mii_link_mode(cp, &full_duplex,
+ cas_read_mii_link_mode(cp, &full_duplex,
&speed, &pause);
cas_mif_poll(cp, 1);
}
cmd->advertising |= ADVERTISED_FIBRE;
if (cp->hw_running) {
- /* pcs uses the same bits as mii */
+ /* pcs uses the same bits as mii */
bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
- cas_read_pcs_link_mode(cp, &full_duplex,
+ cas_read_pcs_link_mode(cp, &full_duplex,
&speed, &pause);
}
}
cmd->autoneg = AUTONEG_DISABLE;
cmd->speed =
(bmcr & CAS_BMCR_SPEED1000) ?
- SPEED_1000 :
- ((bmcr & BMCR_SPEED100) ? SPEED_100:
+ SPEED_1000 :
+ ((bmcr & BMCR_SPEED100) ? SPEED_100:
SPEED_10);
cmd->duplex =
(bmcr & BMCR_FULLDPLX) ?
}
if (linkstate != link_up) {
/* Force these to "unknown" if the link is not up and
- * autonogotiation in enabled. We can set the link
+ * autonogotiation in enabled. We can set the link
* speed to 0, but not cmd->duplex,
* because its legal values are 0 and 1. Ethtool will
* print the value reported in parentheses after the
cas_read_regs(cp, p, regs->len / sizeof(u32));
}
-static int cas_get_stats_count(struct net_device *dev)
+static int cas_get_sset_count(struct net_device *dev, int sset)
{
- return CAS_NUM_STAT_KEYS;
+ switch (sset) {
+ case ETH_SS_STATS:
+ return CAS_NUM_STAT_KEYS;
+ default:
+ return -EOPNOTSUPP;
+ }
}
static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
- memcpy(data, ðtool_cassini_statnames,
+ memcpy(data, ðtool_cassini_statnames,
CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
}
BUG_ON(i != CAS_NUM_STAT_KEYS);
}
-static struct ethtool_ops cas_ethtool_ops = {
+static const struct ethtool_ops cas_ethtool_ops = {
.get_drvinfo = cas_get_drvinfo,
.get_settings = cas_get_settings,
.set_settings = cas_set_settings,
.set_msglevel = cas_set_msglevel,
.get_regs_len = cas_get_regs_len,
.get_regs = cas_get_regs,
- .get_stats_count = cas_get_stats_count,
+ .get_sset_count = cas_get_sset_count,
.get_strings = cas_get_strings,
.get_ethtool_stats = cas_get_ethtool_stats,
};
struct mii_ioctl_data *data = if_mii(ifr);
unsigned long flags;
int rc = -EOPNOTSUPP;
-
+
/* Hold the PM mutex while doing ioctl's or we may collide
* with open/close and power management and oops.
*/
break;
case SIOCSMIIREG: /* Write MII PHY register. */
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- break;
- }
spin_lock_irqsave(&cp->lock, flags);
cas_mif_poll(cp, 0);
rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
break;
default:
break;
- };
+ }
mutex_unlock(&cp->pm_mutex);
return rc;
}
+/* When this chip sits underneath an Intel 31154 bridge, it is the
+ * only subordinate device and we can tweak the bridge settings to
+ * reflect that fact.
+ */
+static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
+{
+ struct pci_dev *pdev = cas_pdev->bus->self;
+ u32 val;
+
+ if (!pdev)
+ return;
+
+ if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
+ return;
+
+ /* Clear bit 10 (Bus Parking Control) in the Secondary
+ * Arbiter Control/Status Register which lives at offset
+ * 0x41. Using a 32-bit word read/modify/write at 0x40
+ * is much simpler so that's how we do this.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ val &= ~0x00040000;
+ pci_write_config_dword(pdev, 0x40, val);
+
+ /* Max out the Multi-Transaction Timer settings since
+ * Cassini is the only device present.
+ *
+ * The register is 16-bit and lives at 0x50. When the
+ * settings are enabled, it extends the GRANT# signal
+ * for a requestor after a transaction is complete. This
+ * allows the next request to run without first needing
+ * to negotiate the GRANT# signal back.
+ *
+ * Bits 12:10 define the grant duration:
+ *
+ * 1 -- 16 clocks
+ * 2 -- 32 clocks
+ * 3 -- 64 clocks
+ * 4 -- 128 clocks
+ * 5 -- 256 clocks
+ *
+ * All other values are illegal.
+ *
+ * Bits 09:00 define which REQ/GNT signal pairs get the
+ * GRANT# signal treatment. We set them all.
+ */
+ pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
+
+ /* The Read Prefecth Policy register is 16-bit and sits at
+ * offset 0x52. It enables a "smart" pre-fetch policy. We
+ * enable it and max out all of the settings since only one
+ * device is sitting underneath and thus bandwidth sharing is
+ * not an issue.
+ *
+ * The register has several 3 bit fields, which indicates a
+ * multiplier applied to the base amount of prefetching the
+ * chip would do. These fields are at:
+ *
+ * 15:13 --- ReRead Primary Bus
+ * 12:10 --- FirstRead Primary Bus
+ * 09:07 --- ReRead Secondary Bus
+ * 06:04 --- FirstRead Secondary Bus
+ *
+ * Bits 03:00 control which REQ/GNT pairs the prefetch settings
+ * get enabled on. Bit 3 is a grouped enabler which controls
+ * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control
+ * the individual REQ/GNT pairs [2:0].
+ */
+ pci_write_config_word(pdev, 0x52,
+ (0x7 << 13) |
+ (0x7 << 10) |
+ (0x7 << 7) |
+ (0x7 << 4) |
+ (0xf << 0));
+
+ /* Force cacheline size to 0x8 */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+
+ /* Force latency timer to maximum setting so Cassini can
+ * sit on the bus as long as it likes.
+ */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
+}
+
+static const struct net_device_ops cas_netdev_ops = {
+ .ndo_open = cas_open,
+ .ndo_stop = cas_close,
+ .ndo_start_xmit = cas_start_xmit,
+ .ndo_get_stats = cas_get_stats,
+ .ndo_set_multicast_list = cas_set_multicast,
+ .ndo_do_ioctl = cas_ioctl,
+ .ndo_tx_timeout = cas_tx_timeout,
+ .ndo_change_mtu = cas_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cas_netpoll,
+#endif
+};
+
static int __devinit cas_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
if (cas_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "Cannot find proper PCI device "
- "base address, aborting.\n");
+ dev_err(&pdev->dev, "Cannot find proper PCI device "
+ "base address, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
dev = alloc_etherdev(sizeof(*cp));
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_disable_pdev;
}
- SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = pci_request_regions(pdev, dev->name);
if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_free_netdev;
}
pci_set_master(pdev);
pci_cmd &= ~PCI_COMMAND_SERR;
pci_cmd |= PCI_COMMAND_PARITY;
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- pci_set_mwi(pdev);
+ if (pci_try_set_mwi(pdev))
+ pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
+
+ cas_program_bridge(pdev);
+
/*
* On some architectures, the default cache line size set
- * by pci_set_mwi reduces perforamnce. We have to increase
+ * by pci_try_set_mwi reduces perforamnce. We have to increase
* it for this case. To start, we'll print some configuration
* data.
*/
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
&orig_cacheline_size);
if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
- cas_cacheline_size =
- (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
+ cas_cacheline_size =
+ (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
- if (pci_write_config_byte(pdev,
- PCI_CACHE_LINE_SIZE,
+ if (pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE,
cas_cacheline_size)) {
- printk(KERN_ERR PFX "Could not set PCI cache "
+ dev_err(&pdev->dev, "Could not set PCI cache "
"line size\n");
goto err_write_cacheline;
}
/* Configure DMA attributes. */
- if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev,
- DMA_64BIT_MASK);
+ DMA_BIT_MASK(64));
if (err < 0) {
- printk(KERN_ERR PFX "Unable to obtain 64-bit DMA "
+ dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
"for consistent allocations\n");
goto err_out_free_res;
}
} else {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, "
+ "aborting\n");
goto err_out_free_res;
}
pci_using_dac = 0;
cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
#endif
cp->dev = dev;
- cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
+ cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
cassini_debug;
cp->link_transition = LINK_TRANSITION_UNKNOWN;
atomic_set(&cp->reset_task_pending_spare, 0);
atomic_set(&cp->reset_task_pending_mtu, 0);
#endif
- INIT_WORK(&cp->reset_task, cas_reset_task, cp);
+ INIT_WORK(&cp->reset_task, cas_reset_task);
/* Default link parameters */
- if (link_mode >= 0 && link_mode <= 6)
+ if (link_mode >= 0 && link_mode < 6)
cp->link_cntl = link_modes[link_mode];
else
cp->link_cntl = BMCR_ANENABLE;
/* give us access to cassini registers */
cp->regs = pci_iomap(pdev, 0, casreg_len);
- if (cp->regs == 0UL) {
- printk(KERN_ERR PFX "Cannot map device registers, "
- "aborting.\n");
+ if (!cp->regs) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
goto err_out_free_res;
}
cp->casreg_len = casreg_len;
cas_reset(cp, 0);
if (cas_check_invariants(cp))
goto err_out_iounmap;
+ if (cp->cas_flags & CAS_FLAG_SATURN)
+ if (cas_saturn_firmware_init(cp))
+ goto err_out_iounmap;
cp->init_block = (struct cas_init_block *)
pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
&cp->block_dvma);
if (!cp->init_block) {
- printk(KERN_ERR PFX "Cannot allocate init block, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
goto err_out_iounmap;
}
- for (i = 0; i < N_TX_RINGS; i++)
+ for (i = 0; i < N_TX_RINGS; i++)
cp->init_txds[i] = cp->init_block->txds[i];
- for (i = 0; i < N_RX_DESC_RINGS; i++)
+ for (i = 0; i < N_RX_DESC_RINGS; i++)
cp->init_rxds[i] = cp->init_block->rxds[i];
- for (i = 0; i < N_RX_COMP_RINGS; i++)
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
cp->init_rxcs[i] = cp->init_block->rxcs[i];
for (i = 0; i < N_RX_FLOWS; i++)
skb_queue_head_init(&cp->rx_flows[i]);
- dev->open = cas_open;
- dev->stop = cas_close;
- dev->hard_start_xmit = cas_start_xmit;
- dev->get_stats = cas_get_stats;
- dev->set_multicast_list = cas_set_multicast;
- dev->do_ioctl = cas_ioctl;
+ dev->netdev_ops = &cas_netdev_ops;
dev->ethtool_ops = &cas_ethtool_ops;
- dev->tx_timeout = cas_tx_timeout;
dev->watchdog_timeo = CAS_TX_TIMEOUT;
- dev->change_mtu = cas_change_mtu;
+
#ifdef USE_NAPI
- dev->poll = cas_poll;
- dev->weight = 64;
-#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = cas_netpoll;
+ netif_napi_add(dev, &cp->napi, cas_poll, 64);
#endif
dev->irq = pdev->irq;
dev->dma = 0;
dev->features |= NETIF_F_HIGHDMA;
if (register_netdev(dev)) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_free_consistent;
}
i = readl(cp->regs + REG_BIM_CFG);
- printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
- "Ethernet[%d] ", dev->name,
- (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
- (i & BIM_CFG_32BIT) ? "32" : "64",
- (i & BIM_CFG_66MHZ) ? "66" : "33",
- (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq);
-
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i],
- i == 5 ? ' ' : ':');
- printk("\n");
+ netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
+ (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
+ (i & BIM_CFG_32BIT) ? "32" : "64",
+ (i & BIM_CFG_66MHZ) ? "66" : "33",
+ (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
+ dev->dev_addr);
pci_set_drvdata(pdev, dev);
cp->hw_running = 1;
err_write_cacheline:
/* Try to restore it in case the error occured after we
- * set it.
+ * set it.
*/
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
cp = netdev_priv(dev);
unregister_netdev(dev);
+ if (cp->fw_data)
+ vfree(cp->fw_data);
+
mutex_lock(&cp->pm_mutex);
flush_scheduled_work();
if (cp->hw_running)
/* Restore the cache line size if we had modified
* it.
*/
- pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
cp->orig_cacheline_size);
}
#endif
unsigned long flags;
mutex_lock(&cp->pm_mutex);
-
+
/* If the driver is opened, we stop the DMA */
if (cp->opened) {
netif_device_detach(dev);
struct net_device *dev = pci_get_drvdata(pdev);
struct cas *cp = netdev_priv(dev);
- printk(KERN_INFO "%s: resuming\n", dev->name);
+ netdev_info(dev, "resuming\n");
mutex_lock(&cp->pm_mutex);
cas_hard_reset(cp);
else
link_transition_timeout = 0;
- return pci_module_init(&cas_driver);
+ return pci_register_driver(&cas_driver);
}
static void __exit cas_cleanup(void)