* by default, the selective clear mask is set up to process rx packets.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mutex.h>
+#include <linux/firmware.h>
#include <net/checksum.h>
#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
#define CAS_NCPUS num_online_cpus()
-#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
+#ifdef CONFIG_CASSINI_NAPI
#define USE_NAPI
#define cas_skb_release(x) netif_receive_skb(x)
#else
#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
#define DRV_MODULE_NAME "cassini"
-#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.4"
-#define DRV_MODULE_RELDATE "1 July 2004"
+#define DRV_MODULE_VERSION "1.6"
+#define DRV_MODULE_RELDATE "21 May 2008"
#define CAS_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \
MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("sun/cassini.bin");
module_param(cassini_debug, int, 0);
MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
module_param(link_mode, int, 0);
CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
};
-static struct pci_device_id cas_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
cas_disable_irq(cp, i);
}
-static inline void cas_buffer_init(cas_page_t *cp)
-{
- struct page *page = cp->buffer;
- atomic_set((atomic_t *)&page->lru.next, 1);
-}
-
-static inline int cas_buffer_count(cas_page_t *cp)
-{
- struct page *page = cp->buffer;
- return atomic_read((atomic_t *)&page->lru.next);
-}
-
-static inline void cas_buffer_inc(cas_page_t *cp)
-{
- struct page *page = cp->buffer;
- atomic_inc((atomic_t *)&page->lru.next);
-}
-
-static inline void cas_buffer_dec(cas_page_t *cp)
-{
- struct page *page = cp->buffer;
- atomic_dec((atomic_t *)&page->lru.next);
-}
-
static void cas_enable_irq(struct cas *cp, const int ring)
{
if (ring == 0) { /* all but TX_DONE */
{
pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
PCI_DMA_FROMDEVICE);
- cas_buffer_dec(page);
__free_pages(page->buffer, cp->page_order);
kfree(page);
return 0;
page->buffer = alloc_pages(flags, cp->page_order);
if (!page->buffer)
goto page_err;
- cas_buffer_init(page);
page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
cp->page_size, PCI_DMA_FROMDEVICE);
return page;
/* free spare buffers */
INIT_LIST_HEAD(&list);
spin_lock(&cp->rx_spare_lock);
- list_splice(&cp->rx_spare_list, &list);
- INIT_LIST_HEAD(&cp->rx_spare_list);
+ list_splice_init(&cp->rx_spare_list, &list);
spin_unlock(&cp->rx_spare_lock);
list_for_each_safe(elem, tmp, &list) {
cas_page_free(cp, list_entry(elem, cas_page_t, list));
* lock than used everywhere else to manipulate this list.
*/
spin_lock(&cp->rx_inuse_lock);
- list_splice(&cp->rx_inuse_list, &list);
- INIT_LIST_HEAD(&cp->rx_inuse_list);
+ list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_inuse_lock);
#else
spin_lock(&cp->rx_spare_lock);
- list_splice(&cp->rx_inuse_list, &list);
- INIT_LIST_HEAD(&cp->rx_inuse_list);
+ list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_spare_lock);
#endif
list_for_each_safe(elem, tmp, &list) {
/* make a local copy of the list */
INIT_LIST_HEAD(&list);
spin_lock(&cp->rx_inuse_lock);
- list_splice(&cp->rx_inuse_list, &list);
- INIT_LIST_HEAD(&cp->rx_inuse_list);
+ list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_inuse_lock);
list_for_each_safe(elem, tmp, &list) {
cas_page_t *page = list_entry(elem, cas_page_t, list);
- if (cas_buffer_count(page) > 1)
+ /*
+ * With the lockless pagecache, cassini buffering scheme gets
+ * slightly less accurate: we might find that a page has an
+ * elevated reference count here, due to a speculative ref,
+ * and skip it as in-use. Ideally we would be able to reclaim
+ * it. However this would be such a rare case, it doesn't
+ * matter too much as we should pick it up the next time round.
+ *
+ * Importantly, if we find that the page has a refcount of 1
+ * here (our refcount), then we know it is definitely not inuse
+ * so we can reuse it.
+ */
+ if (page_count(page->buffer) > 1)
continue;
list_del(elem);
cas_spare_recover(cp, GFP_ATOMIC);
spin_lock(&cp->rx_spare_lock);
if (list_empty(&cp->rx_spare_list)) {
- if (netif_msg_rx_err(cp))
- printk(KERN_ERR "%s: no spare buffers "
- "available.\n", cp->dev->name);
+ netif_err(cp, rx_err, cp->dev,
+ "no spare buffers available\n");
spin_unlock(&cp->rx_spare_lock);
return NULL;
}
#endif
start_aneg:
if (cp->lstate == link_up) {
- printk(KERN_INFO "%s: PCS link down.\n",
- cp->dev->name);
+ netdev_info(cp->dev, "PCS link down\n");
} else {
if (changed) {
- printk(KERN_INFO "%s: link configuration changed\n",
- cp->dev->name);
+ netdev_info(cp->dev, "link configuration changed\n");
}
}
cp->lstate = link_down;
cas_phy_write(cp, MII_BMCR, BMCR_RESET);
udelay(100);
- while (limit--) {
+ while (--limit) {
val = cas_phy_read(cp, MII_BMCR);
if ((val & BMCR_RESET) == 0)
break;
return (limit <= 0);
}
+static int cas_saturn_firmware_init(struct cas *cp)
+{
+ const struct firmware *fw;
+ const char fw_name[] = "sun/cassini.bin";
+ int err;
+
+ if (PHY_NS_DP83065 != cp->phy_id)
+ return 0;
+
+ err = request_firmware(&fw, fw_name, &cp->pdev->dev);
+ if (err) {
+ pr_err("Failed to load firmware \"%s\"\n",
+ fw_name);
+ return err;
+ }
+ if (fw->size < 2) {
+ pr_err("bogus length %zu in \"%s\"\n",
+ fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
+ cp->fw_size = fw->size - 2;
+ cp->fw_data = vmalloc(cp->fw_size);
+ if (!cp->fw_data) {
+ err = -ENOMEM;
+ pr_err("\"%s\" Failed %d\n", fw_name, err);
+ goto out;
+ }
+ memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
+out:
+ release_firmware(fw);
+ return err;
+}
+
static void cas_saturn_firmware_load(struct cas *cp)
{
- cas_saturn_patch_t *patch = cas_saturn_patch;
+ int i;
cas_phy_powerdown(cp);
/* download new firmware */
cas_phy_write(cp, DP83065_MII_MEM, 0x1);
- cas_phy_write(cp, DP83065_MII_REGE, patch->addr);
- while (patch->addr) {
- cas_phy_write(cp, DP83065_MII_REGD, patch->val);
- patch++;
- }
+ cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
+ for (i = 0; i < cp->fw_size; i++)
+ cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
/* enable firmware */
cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
writel(val, cp->regs + REG_PCS_MII_CTRL);
limit = STOP_TRIES;
- while (limit-- > 0) {
+ while (--limit > 0) {
udelay(10);
if ((readl(cp->regs + REG_PCS_MII_CTRL) &
PCS_MII_RESET) == 0)
break;
}
if (limit <= 0)
- printk(KERN_WARNING "%s: PCS reset bit would not "
- "clear [%08x].\n", cp->dev->name,
- readl(cp->regs + REG_PCS_STATE_MACHINE));
+ netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
+ readl(cp->regs + REG_PCS_STATE_MACHINE));
/* Make sure PCS is disabled while changing advertisement
* configuration.
*/
if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
PCS_MII_STATUS_REMOTE_FAULT)) ==
- (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: PCS RemoteFault\n",
- cp->dev->name);
- }
+ (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
+ netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
/* work around link detection issue by querying the PCS state
* machine directly.
cp->link_transition = LINK_TRANSITION_ON_FAILURE;
}
netif_carrier_off(cp->dev);
- if (cp->opened && netif_msg_link(cp)) {
- printk(KERN_INFO "%s: PCS link down.\n",
- cp->dev->name);
- }
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "PCS link down\n");
/* Cassini only: if you force a mode, there can be
* sync problems on link down. to fix that, the following
if (!txmac_stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
- cp->dev->name, txmac_stat);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
/* Defer timer expiration is quite normal,
* don't even log the event.
spin_lock(&cp->stat_lock[0]);
if (txmac_stat & MAC_TX_UNDERRUN) {
- printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
- dev->name);
+ netdev_err(dev, "TX MAC xmit underrun\n");
cp->net_stats[0].tx_fifo_errors++;
}
if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
- printk(KERN_ERR "%s: TX MAC max packet size error.\n",
- dev->name);
+ netdev_err(dev, "TX MAC max packet size error\n");
cp->net_stats[0].tx_errors++;
}
cas_page_t *page = cp->rx_pages[1][index];
cas_page_t *new;
- if (cas_buffer_count(page) == 1)
+ if (page_count(page->buffer) == 1)
return page;
new = cas_page_dequeue(cp);
cas_page_t **page1 = cp->rx_pages[1];
/* swap if buffer is in use */
- if (cas_buffer_count(page0[index]) > 1) {
+ if (page_count(page0[index]->buffer) > 1) {
cas_page_t *new = cas_page_spare(cp, index);
if (new) {
page1[index] = page0[index];
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
return 1;
}
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
return 1;
}
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX reset command will not execute, "
- "resetting whole chip.\n", dev->name);
+ netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
return 1;
}
if (!stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
- cp->dev->name, stat);
+ netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
/* these are all rollovers */
spin_lock(&cp->stat_lock[0]);
if (!stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
- cp->dev->name, stat);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "mac interrupt, stat: 0x%x\n", stat);
/* This interrupt is just for pause frame and pause
* tracking. It is useful for diagnostics and debug
switch (cp->lstate) {
case link_force_ret:
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: Autoneg failed again, keeping"
- " forced mode\n", cp->dev->name);
+ netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
cp->timer_ticks = 5;
cp->lstate = link_force_ok;
cas_mif_poll(cp, 0);
cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
cp->timer_ticks = 5;
- if (cp->opened && netif_msg_link(cp))
- printk(KERN_INFO "%s: Got link after fallback, retrying"
- " autoneg once...\n", cp->dev->name);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev,
+ "Got link after fallback, retrying autoneg once...\n");
cas_phy_write(cp, MII_BMCR,
cp->link_fcntl | BMCR_ANENABLE |
BMCR_ANRESTART);
cp->link_transition = LINK_TRANSITION_LINK_DOWN;
netif_carrier_off(cp->dev);
- if (cp->opened && netif_msg_link(cp))
- printk(KERN_INFO "%s: Link down\n",
- cp->dev->name);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "Link down\n");
restart = 1;
} else if (++cp->timer_ticks > 10)
if (!stat)
return 0;
- printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
- readl(cp->regs + REG_BIM_DIAG));
+ netdev_err(dev, "PCI error [%04x:%04x]",
+ stat, readl(cp->regs + REG_BIM_DIAG));
/* cassini+ has this reserved */
if ((stat & PCI_ERR_BADACK) &&
((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
- printk("<No ACK64# during ABS64 cycle> ");
+ pr_cont(" <No ACK64# during ABS64 cycle>");
if (stat & PCI_ERR_DTRTO)
- printk("<Delayed transaction timeout> ");
+ pr_cont(" <Delayed transaction timeout>");
if (stat & PCI_ERR_OTHER)
- printk("<other> ");
+ pr_cont(" <other>");
if (stat & PCI_ERR_BIM_DMA_WRITE)
- printk("<BIM DMA 0 write req> ");
+ pr_cont(" <BIM DMA 0 write req>");
if (stat & PCI_ERR_BIM_DMA_READ)
- printk("<BIM DMA 0 read req> ");
- printk("\n");
+ pr_cont(" <BIM DMA 0 read req>");
+ pr_cont("\n");
if (stat & PCI_ERR_OTHER) {
u16 cfg;
* true cause.
*/
pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
- printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
- dev->name, cfg);
+ netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
if (cfg & PCI_STATUS_PARITY)
- printk(KERN_ERR "%s: PCI parity error detected.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error detected\n");
if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI target abort\n");
if (cfg & PCI_STATUS_REC_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI master acks target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI master acks target abort\n");
if (cfg & PCI_STATUS_REC_MASTER_ABORT)
- printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
+ netdev_err(dev, "PCI master abort\n");
if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
- printk(KERN_ERR "%s: PCI system error SERR#.\n",
- dev->name);
+ netdev_err(dev, "PCI system error SERR#\n");
if (cfg & PCI_STATUS_DETECTED_PARITY)
- printk(KERN_ERR "%s: PCI parity error.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error\n");
/* Write the error bits back to clear them. */
cfg &= (PCI_STATUS_PARITY |
{
if (status & INTR_RX_TAG_ERROR) {
/* corrupt RX tag framing */
- if (netif_msg_rx_err(cp))
- printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
- cp->dev->name);
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "corrupt rx tag framing\n");
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_errors++;
spin_unlock(&cp->stat_lock[0]);
if (status & INTR_RX_LEN_MISMATCH) {
/* length mismatch. */
- if (netif_msg_rx_err(cp))
- printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
- cp->dev->name);
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "length mismatch for rx frame\n");
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_errors++;
spin_unlock(&cp->stat_lock[0]);
#if 1
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_all);
- printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
- dev->name, status);
+ netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
- printk(KERN_ERR "reset called in cas_abnormal_irq\n");
+ netdev_err(dev, "reset called in cas_abnormal_irq\n");
schedule_work(&cp->reset_task);
#endif
return 1;
if (count < 0)
break;
- if (netif_msg_tx_done(cp))
- printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
- cp->dev->name, ring, entry);
+ netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
+ "tx[%d] done, slot %d\n", ring, entry);
skbs[entry] = NULL;
cp->tx_tiny_use[ring][entry].nbufs = 0;
#ifdef USE_TX_COMPWB
u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
#endif
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
- cp->dev->name, status, (unsigned long long)compwb);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "tx interrupt, status: 0x%x, %llx\n",
+ status, (unsigned long long)compwb);
/* process all the rings */
for (ring = 0; ring < N_TX_RINGS; ring++) {
#ifdef USE_TX_COMPWB
struct cas_page *page;
struct sk_buff *skb;
void *addr, *crcaddr;
+ __sum16 csum;
char *p;
hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
return -1;
*skbref = skb;
- skb->dev = cp->dev;
skb_reserve(skb, swivel);
p = skb->data;
hlen = min(cp->page_size - off, dlen);
if (hlen < 0) {
- if (netif_msg_rx_err(cp)) {
- printk(KERN_DEBUG "%s: rx page overflow: "
- "%d\n", cp->dev->name, hlen);
- }
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
dev_kfree_skb_irq(skb);
return -1;
}
skb_shinfo(skb)->nr_frags++;
skb->data_len += hlen - swivel;
+ skb->truesize += hlen - swivel;
skb->len += hlen - swivel;
get_page(page->buffer);
- cas_buffer_inc(page);
frag->page = page->buffer;
frag->page_offset = off;
frag->size = hlen - swivel;
frag++;
get_page(page->buffer);
- cas_buffer_inc(page);
frag->page = page->buffer;
frag->page_offset = 0;
frag->size = hlen;
off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
hlen = min(cp->page_size - off, dlen);
if (hlen < 0) {
- if (netif_msg_rx_err(cp)) {
- printk(KERN_DEBUG "%s: rx page overflow: "
- "%d\n", cp->dev->name, hlen);
- }
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
dev_kfree_skb_irq(skb);
return -1;
}
skb_put(skb, alloclen);
}
- i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]);
+ csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
if (cp->crc_size) {
/* checksum includes FCS. strip it out. */
- i = csum_fold(csum_partial(crcaddr, cp->crc_size, i));
+ csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
+ csum_unfold(csum)));
if (addr)
cas_page_unmap(addr);
}
- skb->csum = ntohs(i ^ 0xffff);
- skb->ip_summed = CHECKSUM_COMPLETE;
skb->protocol = eth_type_trans(skb, cp->dev);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ skb->csum = csum_unfold(~csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
return len;
}
* do any additional locking here. stick the buffer
* at the end.
*/
- __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow);
+ __skb_queue_tail(flow, skb);
if (words[0] & RX_COMP1_RELEASE_FLOW) {
while ((skb = __skb_dequeue(flow))) {
cas_skb_release(skb);
entry = cp->rx_old[ring];
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
- cp->dev->name, ring, entry);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rxd[%d] interrupt, done: %d\n", ring, entry);
cluster = -1;
count = entry & 0x3;
released = 0;
while (entry != last) {
/* make a new buffer if it's still in use */
- if (cas_buffer_count(page[entry]) > 1) {
+ if (page_count(page[entry]->buffer) > 1) {
cas_page_t *new = cas_page_dequeue(cp);
if (!new) {
/* let the timer know that we need to
int entry, drops;
int npackets = 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
- cp->dev->name, ring,
- readl(cp->regs + REG_RX_COMP_HEAD),
- cp->rx_new[ring]);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rx[%d] interrupt, done: %d/%d\n",
+ ring,
+ readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
entry = cp->rx_new[ring];
drops = 0;
while (1) {
struct cas_rx_comp *rxc = rxcs + entry;
- struct sk_buff *skb;
+ struct sk_buff *uninitialized_var(skb);
int type, len;
u64 words[4];
int i, dring;
cp->net_stats[ring].rx_packets++;
cp->net_stats[ring].rx_bytes += len;
spin_unlock(&cp->stat_lock[ring]);
- cp->dev->last_rx = jiffies;
next:
npackets++;
cp->rx_new[ring] = entry;
if (drops)
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- cp->dev->name);
+ netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
return npackets;
}
last = cp->rx_cur[ring];
entry = cp->rx_new[ring];
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
- dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
- entry);
+ netif_printk(cp, intr, KERN_DEBUG, dev,
+ "rxc[%d] interrupt, done: %d/%d\n",
+ ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
/* zero and re-mark descriptors */
while (last != entry) {
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(dev);
+ napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, ring, 0);
#endif
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(dev);
+ napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, 1, 0);
#endif
if (status & INTR_RX_DONE) {
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(dev);
+ napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, 0, 0);
#endif
#ifdef USE_NAPI
-static int cas_poll(struct net_device *dev, int *budget)
+static int cas_poll(struct napi_struct *napi, int budget)
{
- struct cas *cp = netdev_priv(dev);
- int i, enable_intr, todo, credits;
+ struct cas *cp = container_of(napi, struct cas, napi);
+ struct net_device *dev = cp->dev;
+ int i, enable_intr, credits;
u32 status = readl(cp->regs + REG_INTR_STATUS);
unsigned long flags;
/* NAPI rx packets. we spread the credits across all of the
* rxc rings
- */
- todo = min(*budget, dev->quota);
-
- /* to make sure we're fair with the work we loop through each
+ *
+ * to make sure we're fair with the work we loop through each
* ring N_RX_COMP_RING times with a request of
- * todo / N_RX_COMP_RINGS
+ * budget / N_RX_COMP_RINGS
*/
enable_intr = 1;
credits = 0;
for (i = 0; i < N_RX_COMP_RINGS; i++) {
int j;
for (j = 0; j < N_RX_COMP_RINGS; j++) {
- credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS);
- if (credits >= todo) {
+ credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
+ if (credits >= budget) {
enable_intr = 0;
goto rx_comp;
}
}
rx_comp:
- *budget -= credits;
- dev->quota -= credits;
-
/* final rx completion */
spin_lock_irqsave(&cp->lock, flags);
if (status)
#endif
spin_unlock_irqrestore(&cp->lock, flags);
if (enable_intr) {
- netif_rx_complete(dev);
+ napi_complete(napi);
cas_unmask_intr(cp);
- return 0;
}
- return 1;
+ return credits;
}
#endif
{
struct cas *cp = netdev_priv(dev);
- printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
if (!cp->hw_running) {
- printk("%s: hrm.. hw not running!\n", dev->name);
+ netdev_err(dev, "hrm.. hw not running!\n");
return;
}
- printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
- dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
-
- printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
- dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
-
- printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
- "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
- dev->name,
- readl(cp->regs + REG_TX_CFG),
- readl(cp->regs + REG_MAC_TX_STATUS),
- readl(cp->regs + REG_MAC_TX_CFG),
- readl(cp->regs + REG_TX_FIFO_PKT_CNT),
- readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
- readl(cp->regs + REG_TX_FIFO_READ_PTR),
- readl(cp->regs + REG_TX_SM_1),
- readl(cp->regs + REG_TX_SM_2));
-
- printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
- dev->name,
- readl(cp->regs + REG_RX_CFG),
- readl(cp->regs + REG_MAC_RX_STATUS),
- readl(cp->regs + REG_MAC_RX_CFG));
-
- printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
- dev->name,
- readl(cp->regs + REG_HP_STATE_MACHINE),
- readl(cp->regs + REG_HP_STATUS0),
- readl(cp->regs + REG_HP_STATUS1),
- readl(cp->regs + REG_HP_STATUS2));
+ netdev_err(dev, "MIF_STATE[%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE));
+
+ netdev_err(dev, "MAC_STATE[%08x]\n",
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+ netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
+ readl(cp->regs + REG_TX_CFG),
+ readl(cp->regs + REG_MAC_TX_STATUS),
+ readl(cp->regs + REG_MAC_TX_CFG),
+ readl(cp->regs + REG_TX_FIFO_PKT_CNT),
+ readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
+ readl(cp->regs + REG_TX_FIFO_READ_PTR),
+ readl(cp->regs + REG_TX_SM_1),
+ readl(cp->regs + REG_TX_SM_2));
+
+ netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_RX_CFG),
+ readl(cp->regs + REG_MAC_RX_STATUS),
+ readl(cp->regs + REG_MAC_RX_CFG));
+
+ netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_HP_STATE_MACHINE),
+ readl(cp->regs + REG_HP_STATUS0),
+ readl(cp->regs + REG_HP_STATUS1),
+ readl(cp->regs + REG_HP_STATUS2));
#if 1
atomic_inc(&cp->reset_task_pending);
CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return 1;
}
ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u64 csum_start_off, csum_stuff_off;
-
- csum_start_off = (u64) (skb->h.raw - skb->data);
- csum_stuff_off = csum_start_off + skb->csum_offset;
+ const u64 csum_start_off = skb_transport_offset(skb);
+ const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = TX_DESC_CSUM_EN |
CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
ctrl | TX_DESC_SOF, 0);
entry = TX_DESC_NEXT(ring, entry);
- memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
- len - tabort, tabort);
+ skb_copy_from_linear_data_offset(skb, len - tabort,
+ tx_tiny_buf(cp, ring, entry), tabort);
mapping = tx_tiny_map(cp, ring, entry, tentry);
cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
(nr_frags == 0));
if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
- if (netif_msg_tx_queued(cp))
- printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
- "avail %d\n",
- dev->name, ring, entry, skb->len,
- TX_BUFFS_AVAIL(cp, ring));
+ netif_printk(cp, tx_queued, KERN_DEBUG, dev,
+ "tx[%d] queued, slot %d, skblen %d, avail %d\n",
+ ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
writel(entry, cp->regs + REG_TX_KICKN(ring));
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
return 0;
}
-static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
static int ring;
if (skb_padto(skb, cp->min_frame_size))
- return 0;
+ return NETDEV_TX_OK;
/* XXX: we need some higher-level QoS hooks to steer packets to
* individual queues.
*/
if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
- return 1;
- dev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
static void cas_init_tx_dma(struct cas *cp)
cas_init_rx_dma(cp);
}
+static void cas_process_mc_list(struct cas *cp)
+{
+ u16 hash_table[16];
+ u32 crc;
+ struct netdev_hw_addr *ha;
+ int i = 1;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(ha, cp->dev) {
+ if (i <= CAS_MC_EXACT_MATCH_SIZE) {
+ /* use the alternate mac address registers for the
+ * first 15 multicast addresses
+ */
+ writel((ha->addr[4] << 8) | ha->addr[5],
+ cp->regs + REG_MAC_ADDRN(i*3 + 0));
+ writel((ha->addr[2] << 8) | ha->addr[3],
+ cp->regs + REG_MAC_ADDRN(i*3 + 1));
+ writel((ha->addr[0] << 8) | ha->addr[1],
+ cp->regs + REG_MAC_ADDRN(i*3 + 2));
+ i++;
+ }
+ else {
+ /* use hw hash table for the next series of
+ * multicast addresses
+ */
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ crc >>= 24;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ }
+ }
+ for (i = 0; i < 16; i++)
+ writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
+}
+
/* Must be invoked under cp->lock. */
static u32 cas_setup_multicast(struct cas *cp)
{
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
} else {
- u16 hash_table[16];
- u32 crc;
- struct dev_mc_list *dmi = cp->dev->mc_list;
- int i;
-
- /* use the alternate mac address registers for the
- * first 15 multicast addresses
- */
- for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
- if (!dmi) {
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
- continue;
- }
- writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
- cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
- cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
- cp->regs + REG_MAC_ADDRN(i*3 + 2));
- dmi = dmi->next;
- }
-
- /* use hw hash table for the next series of
- * multicast addresses
- */
- memset(hash_table, 0, sizeof(hash_table));
- while (dmi) {
- crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
- crc >>= 24;
- hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
- dmi = dmi->next;
- }
- for (i=0; i < 16; i++)
- writel(hash_table[i], cp->regs +
- REG_MAC_HASH_TABLEN(i));
+ cas_process_mc_list(cp);
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
}
if (readl(cp->regs + REG_MAC_TX_RESET) |
readl(cp->regs + REG_MAC_RX_RESET))
- printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
- cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
- readl(cp->regs + REG_MAC_RX_RESET),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
+ readl(cp->regs + REG_MAC_TX_RESET),
+ readl(cp->regs + REG_MAC_RX_RESET),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
}
goto done;
/* Sun MAC prefix then 3 random bytes. */
- printk(PFX "MAC address not found in ROM VPD\n");
+ pr_info("MAC address not found in ROM VPD\n");
dev_addr[0] = 0x08;
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
static void cas_check_pci_invariants(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
- u8 rev;
cp->cas_flags = 0;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
(pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
- if (rev >= CAS_ID_REVPLUS)
+ if (pdev->revision >= CAS_ID_REVPLUS)
cp->cas_flags |= CAS_FLAG_REG_PLUS;
- if (rev < CAS_ID_REVPLUS02u)
+ if (pdev->revision < CAS_ID_REVPLUS02u)
cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
/* Original Cassini supports HW CSUM, but it's not
* enabled by default as it can trigger TX hangs.
*/
- if (rev < CAS_ID_REV2)
+ if (pdev->revision < CAS_ID_REV2)
cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
} else {
/* Only sun has original cassini chips. */
__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
} else {
- printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
+ printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
}
}
#endif
}
}
}
- printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
+ pr_err("MII phy did not respond [%08x]\n",
readl(cp->regs + REG_MIF_STATE_MACHINE));
return -1;
val = readl(cp->regs + REG_MAC_RX_CFG);
if ((val & MAC_RX_CFG_EN)) {
if (txfailed) {
- printk(KERN_ERR
- "%s: enabling mac failed [tx:%08x:%08x].\n",
- cp->dev->name,
- readl(cp->regs + REG_MIF_STATE_MACHINE),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev,
+ "enabling mac failed [tx:%08x:%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
}
goto enable_rx_done;
}
udelay(10);
}
- printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
- cp->dev->name,
- (txfailed? "tx,rx":"rx"),
- readl(cp->regs + REG_MIF_STATE_MACHINE),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
+ (txfailed ? "tx,rx" : "rx"),
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
enable_rx_done:
cas_unmask_intr(cp); /* enable interrupts */
}
}
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
- cp->dev->name, speed, (full_duplex ? "full" : "half"));
+ netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
+ speed, full_duplex ? "full" : "half");
val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
if (CAS_PHY_MII(cp->phy_type)) {
if (netif_msg_link(cp)) {
if (pause & 0x01) {
- printk(KERN_INFO "%s: Pause is enabled "
- "(rxfifo: %d off: %d on: %d)\n",
- cp->dev->name,
- cp->rx_fifo_size,
- cp->rx_pause_off,
- cp->rx_pause_on);
+ netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+ cp->rx_fifo_size,
+ cp->rx_pause_off,
+ cp->rx_pause_on);
} else if (pause & 0x10) {
- printk(KERN_INFO "%s: TX pause enabled\n",
- cp->dev->name);
+ netdev_info(cp->dev, "TX pause enabled\n");
} else {
- printk(KERN_INFO "%s: Pause is disabled\n",
- cp->dev->name);
+ netdev_info(cp->dev, "Pause is disabled\n");
}
}
goto done;
udelay(10);
}
- printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
+ netdev_err(cp->dev, "sw reset failed\n");
done:
/* enable various BIM interrupts */
#else
atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
CAS_RESET_ALL : CAS_RESET_MTU);
- printk(KERN_ERR "reset called in cas_change_mtu\n");
+ pr_err("reset called in cas_change_mtu\n");
schedule_work(&cp->reset_task);
#endif
if (((tlm == 0x5) || (tlm == 0x3)) &&
(CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
- if (netif_msg_tx_err(cp))
- printk(KERN_DEBUG "%s: tx err: "
- "MAC_STATE[%08x]\n",
- cp->dev->name, val);
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: MAC_STATE[%08x]\n", val);
reset = 1;
goto done;
}
wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
if ((val == 0) && (wptr != rptr)) {
- if (netif_msg_tx_err(cp))
- printk(KERN_DEBUG "%s: tx err: "
- "TX_FIFO[%08x:%08x:%08x]\n",
- cp->dev->name, val, wptr, rptr);
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: TX_FIFO[%08x:%08x:%08x]\n",
+ val, wptr, rptr);
reset = 1;
}
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
- printk(KERN_ERR "reset called in cas_link_timer\n");
+ pr_err("reset called in cas_link_timer\n");
schedule_work(&cp->reset_task);
#endif
}
cas_unlock_all_restore(cp, flags);
}
+ err = -ENOMEM;
if (cas_tx_tiny_alloc(cp) < 0)
- return -ENOMEM;
+ goto err_unlock;
/* alloc rx descriptors */
- err = -ENOMEM;
if (cas_alloc_rxds(cp) < 0)
goto err_tx_tiny;
*/
if (request_irq(cp->pdev->irq, cas_interrupt,
IRQF_SHARED, dev->name, (void *) dev)) {
- printk(KERN_ERR "%s: failed to request irq !\n",
- cp->dev->name);
+ netdev_err(cp->dev, "failed to request irq !\n");
err = -EAGAIN;
goto err_spare;
}
+#ifdef USE_NAPI
+ napi_enable(&cp->napi);
+#endif
/* init hw */
cas_lock_all_save(cp, flags);
cas_clean_rings(cp);
cas_free_rxds(cp);
err_tx_tiny:
cas_tx_tiny_free(cp);
+err_unlock:
mutex_unlock(&cp->pm_mutex);
return err;
}
unsigned long flags;
struct cas *cp = netdev_priv(dev);
+#ifdef USE_NAPI
+ napi_disable(&cp->napi);
+#endif
/* Make sure we don't get distracted by suspend/resume */
mutex_lock(&cp->pm_mutex);
{"tx_fifo_errors"},
{"tx_packets"}
};
-#define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN)
+#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
static struct {
const int offsets; /* neg. values for 2nd arg to cas_read_phy */
{REG_MAC_COLL_EXCESS},
{REG_MAC_COLL_LATE}
};
-#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int))
+#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
cas_read_regs(cp, p, regs->len / sizeof(u32));
}
-static int cas_get_stats_count(struct net_device *dev)
+static int cas_get_sset_count(struct net_device *dev, int sset)
{
- return CAS_NUM_STAT_KEYS;
+ switch (sset) {
+ case ETH_SS_STATS:
+ return CAS_NUM_STAT_KEYS;
+ default:
+ return -EOPNOTSUPP;
+ }
}
static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
.set_msglevel = cas_set_msglevel,
.get_regs_len = cas_get_regs_len,
.get_regs = cas_get_regs,
- .get_stats_count = cas_get_stats_count,
+ .get_sset_count = cas_get_sset_count,
.get_strings = cas_get_strings,
.get_ethtool_stats = cas_get_ethtool_stats,
};
break;
case SIOCSMIIREG: /* Write MII PHY register. */
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- break;
- }
spin_lock_irqsave(&cp->lock, flags);
cas_mif_poll(cp, 0);
rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
break;
default:
break;
- };
+ }
mutex_unlock(&cp->pm_mutex);
return rc;
}
+/* When this chip sits underneath an Intel 31154 bridge, it is the
+ * only subordinate device and we can tweak the bridge settings to
+ * reflect that fact.
+ */
+static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
+{
+ struct pci_dev *pdev = cas_pdev->bus->self;
+ u32 val;
+
+ if (!pdev)
+ return;
+
+ if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
+ return;
+
+ /* Clear bit 10 (Bus Parking Control) in the Secondary
+ * Arbiter Control/Status Register which lives at offset
+ * 0x41. Using a 32-bit word read/modify/write at 0x40
+ * is much simpler so that's how we do this.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ val &= ~0x00040000;
+ pci_write_config_dword(pdev, 0x40, val);
+
+ /* Max out the Multi-Transaction Timer settings since
+ * Cassini is the only device present.
+ *
+ * The register is 16-bit and lives at 0x50. When the
+ * settings are enabled, it extends the GRANT# signal
+ * for a requestor after a transaction is complete. This
+ * allows the next request to run without first needing
+ * to negotiate the GRANT# signal back.
+ *
+ * Bits 12:10 define the grant duration:
+ *
+ * 1 -- 16 clocks
+ * 2 -- 32 clocks
+ * 3 -- 64 clocks
+ * 4 -- 128 clocks
+ * 5 -- 256 clocks
+ *
+ * All other values are illegal.
+ *
+ * Bits 09:00 define which REQ/GNT signal pairs get the
+ * GRANT# signal treatment. We set them all.
+ */
+ pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
+
+ /* The Read Prefecth Policy register is 16-bit and sits at
+ * offset 0x52. It enables a "smart" pre-fetch policy. We
+ * enable it and max out all of the settings since only one
+ * device is sitting underneath and thus bandwidth sharing is
+ * not an issue.
+ *
+ * The register has several 3 bit fields, which indicates a
+ * multiplier applied to the base amount of prefetching the
+ * chip would do. These fields are at:
+ *
+ * 15:13 --- ReRead Primary Bus
+ * 12:10 --- FirstRead Primary Bus
+ * 09:07 --- ReRead Secondary Bus
+ * 06:04 --- FirstRead Secondary Bus
+ *
+ * Bits 03:00 control which REQ/GNT pairs the prefetch settings
+ * get enabled on. Bit 3 is a grouped enabler which controls
+ * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control
+ * the individual REQ/GNT pairs [2:0].
+ */
+ pci_write_config_word(pdev, 0x52,
+ (0x7 << 13) |
+ (0x7 << 10) |
+ (0x7 << 7) |
+ (0x7 << 4) |
+ (0xf << 0));
+
+ /* Force cacheline size to 0x8 */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+
+ /* Force latency timer to maximum setting so Cassini can
+ * sit on the bus as long as it likes.
+ */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
+}
+
+static const struct net_device_ops cas_netdev_ops = {
+ .ndo_open = cas_open,
+ .ndo_stop = cas_close,
+ .ndo_start_xmit = cas_start_xmit,
+ .ndo_get_stats = cas_get_stats,
+ .ndo_set_multicast_list = cas_set_multicast,
+ .ndo_do_ioctl = cas_ioctl,
+ .ndo_tx_timeout = cas_tx_timeout,
+ .ndo_change_mtu = cas_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cas_netpoll,
+#endif
+};
+
static int __devinit cas_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
if (cas_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Cannot find proper PCI device "
- "base address, aborting.\n");
+ "base address, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
dev = alloc_etherdev(sizeof(*cp));
if (!dev) {
- dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_disable_pdev;
}
- SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = pci_request_regions(pdev, dev->name);
if (err) {
- dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_free_netdev;
}
pci_set_master(pdev);
pci_cmd &= ~PCI_COMMAND_SERR;
pci_cmd |= PCI_COMMAND_PARITY;
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- pci_set_mwi(pdev);
+ if (pci_try_set_mwi(pdev))
+ pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
+
+ cas_program_bridge(pdev);
+
/*
* On some architectures, the default cache line size set
- * by pci_set_mwi reduces perforamnce. We have to increase
+ * by pci_try_set_mwi reduces perforamnce. We have to increase
* it for this case. To start, we'll print some configuration
* data.
*/
/* Configure DMA attributes. */
- if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev,
- DMA_64BIT_MASK);
+ DMA_BIT_MASK(64));
if (err < 0) {
dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
"for consistent allocations\n");
}
} else {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration, "
- "aborting.\n");
+ "aborting\n");
goto err_out_free_res;
}
pci_using_dac = 0;
INIT_WORK(&cp->reset_task, cas_reset_task);
/* Default link parameters */
- if (link_mode >= 0 && link_mode <= 6)
+ if (link_mode >= 0 && link_mode < 6)
cp->link_cntl = link_modes[link_mode];
else
cp->link_cntl = BMCR_ANENABLE;
/* give us access to cassini registers */
cp->regs = pci_iomap(pdev, 0, casreg_len);
- if (cp->regs == 0UL) {
- dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
+ if (!cp->regs) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
goto err_out_free_res;
}
cp->casreg_len = casreg_len;
cas_reset(cp, 0);
if (cas_check_invariants(cp))
goto err_out_iounmap;
+ if (cp->cas_flags & CAS_FLAG_SATURN)
+ if (cas_saturn_firmware_init(cp))
+ goto err_out_iounmap;
cp->init_block = (struct cas_init_block *)
pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
&cp->block_dvma);
if (!cp->init_block) {
- dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n");
+ dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
goto err_out_iounmap;
}
for (i = 0; i < N_RX_FLOWS; i++)
skb_queue_head_init(&cp->rx_flows[i]);
- dev->open = cas_open;
- dev->stop = cas_close;
- dev->hard_start_xmit = cas_start_xmit;
- dev->get_stats = cas_get_stats;
- dev->set_multicast_list = cas_set_multicast;
- dev->do_ioctl = cas_ioctl;
+ dev->netdev_ops = &cas_netdev_ops;
dev->ethtool_ops = &cas_ethtool_ops;
- dev->tx_timeout = cas_tx_timeout;
dev->watchdog_timeo = CAS_TX_TIMEOUT;
- dev->change_mtu = cas_change_mtu;
+
#ifdef USE_NAPI
- dev->poll = cas_poll;
- dev->weight = 64;
-#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = cas_netpoll;
+ netif_napi_add(dev, &cp->napi, cas_poll, 64);
#endif
dev->irq = pdev->irq;
dev->dma = 0;
dev->features |= NETIF_F_HIGHDMA;
if (register_netdev(dev)) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_free_consistent;
}
i = readl(cp->regs + REG_BIM_CFG);
- printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
- "Ethernet[%d] ", dev->name,
- (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
- (i & BIM_CFG_32BIT) ? "32" : "64",
- (i & BIM_CFG_66MHZ) ? "66" : "33",
- (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq);
-
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i],
- i == 5 ? ' ' : ':');
- printk("\n");
+ netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
+ (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
+ (i & BIM_CFG_32BIT) ? "32" : "64",
+ (i & BIM_CFG_66MHZ) ? "66" : "33",
+ (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
+ dev->dev_addr);
pci_set_drvdata(pdev, dev);
cp->hw_running = 1;
cp = netdev_priv(dev);
unregister_netdev(dev);
+ if (cp->fw_data)
+ vfree(cp->fw_data);
+
mutex_lock(&cp->pm_mutex);
flush_scheduled_work();
if (cp->hw_running)
struct net_device *dev = pci_get_drvdata(pdev);
struct cas *cp = netdev_priv(dev);
- printk(KERN_INFO "%s: resuming\n", dev->name);
+ netdev_info(dev, "resuming\n");
mutex_lock(&cp->pm_mutex);
cas_hard_reset(cp);