* by default, the selective clear mask is set up to process rx packets.
*/
-#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mii.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/mutex.h>
#include <net/checksum.h>
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
+static int link_mode;
+
MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_PARM(cassini_debug, "i");
+module_param(cassini_debug, int, 0);
MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
-MODULE_PARM(link_mode, "i");
+module_param(link_mode, int, 0);
MODULE_PARM_DESC(link_mode, "default link mode");
/*
* Value in seconds, for user input.
*/
static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
-MODULE_PARM(linkdown_timeout, "i");
+module_param(linkdown_timeout, int, 0);
MODULE_PARM_DESC(linkdown_timeout,
"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
static int link_transition_timeout;
-static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
-static int link_mode;
static u16 link_modes[] __devinitdata = {
BMCR_ANENABLE, /* 0 : autoneg */
cas_disable_irq(cp, i);
}
+static inline void cas_buffer_init(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ atomic_set((atomic_t *)&page->lru.next, 1);
+}
+
+static inline int cas_buffer_count(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ return atomic_read((atomic_t *)&page->lru.next);
+}
+
+static inline void cas_buffer_inc(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ atomic_inc((atomic_t *)&page->lru.next);
+}
+
+static inline void cas_buffer_dec(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ atomic_dec((atomic_t *)&page->lru.next);
+}
+
static void cas_enable_irq(struct cas *cp, const int ring)
{
if (ring == 0) { /* all but TX_DONE */
{
pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
PCI_DMA_FROMDEVICE);
+ cas_buffer_dec(page);
__free_pages(page->buffer, cp->page_order);
kfree(page);
return 0;
page->buffer = alloc_pages(flags, cp->page_order);
if (!page->buffer)
goto page_err;
+ cas_buffer_init(page);
page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
cp->page_size, PCI_DMA_FROMDEVICE);
return page;
list_for_each_safe(elem, tmp, &list) {
cas_page_t *page = list_entry(elem, cas_page_t, list);
- if (page_count(page->buffer) > 1)
+ if (cas_buffer_count(page) > 1)
continue;
list_del(elem);
cas_page_t *page = cp->rx_pages[1][index];
cas_page_t *new;
- if (page_count(page->buffer) == 1)
+ if (cas_buffer_count(page) == 1)
return page;
new = cas_page_dequeue(cp);
cas_page_t **page1 = cp->rx_pages[1];
/* swap if buffer is in use */
- if (page_count(page0[index]->buffer) > 1) {
+ if (cas_buffer_count(page0[index]) > 1) {
cas_page_t *new = cas_page_spare(cp, index);
if (new) {
page1[index] = page0[index];
u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
#endif
if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n",
- cp->dev->name, status, compwb);
+ printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
+ cp->dev->name, status, (unsigned long long)compwb);
/* process all the rings */
for (ring = 0; ring < N_TX_RINGS; ring++) {
#ifdef USE_TX_COMPWB
skb->len += hlen - swivel;
get_page(page->buffer);
+ cas_buffer_inc(page);
frag->page = page->buffer;
frag->page_offset = off;
frag->size = hlen - swivel;
frag++;
get_page(page->buffer);
+ cas_buffer_inc(page);
frag->page = page->buffer;
frag->page_offset = 0;
frag->size = hlen;
released = 0;
while (entry != last) {
/* make a new buffer if it's still in use */
- if (page_count(page[entry]->buffer) > 1) {
+ if (cas_buffer_count(page[entry]) > 1) {
cas_page_t *new = cas_page_dequeue(cp);
if (!new) {
/* let the timer know that we need to
*/
static int ring;
- skb = skb_padto(skb, cp->min_frame_size);
- if (!skb)
+ if (skb_padto(skb, cp->min_frame_size))
return 0;
/* XXX: we need some higher-level QoS hooks to steer packets to
spin_unlock(&cp->stat_lock[N_TX_RINGS]);
}
-/* Shut down the chip, must be called with pm_sem held. */
+/* Shut down the chip, must be called with pm_mutex held. */
static void cas_shutdown(struct cas *cp)
{
unsigned long flags;
int hw_was_up, err;
unsigned long flags;
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
hw_was_up = cp->hw_running;
- /* The power-management semaphore protects the hw_running
+ /* The power-management mutex protects the hw_running
* etc. state so it is safe to do this bit without cp->lock
*/
if (!cp->hw_running) {
* mapping to expose them
*/
if (request_irq(cp->pdev->irq, cas_interrupt,
- SA_SHIRQ, dev->name, (void *) dev)) {
+ IRQF_SHARED, dev->name, (void *) dev)) {
printk(KERN_ERR "%s: failed to request irq !\n",
cp->dev->name);
err = -EAGAIN;
cas_unlock_all_restore(cp, flags);
netif_start_queue(dev);
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
return 0;
err_spare:
cas_free_rxds(cp);
err_tx_tiny:
cas_tx_tiny_free(cp);
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
return err;
}
struct cas *cp = netdev_priv(dev);
/* Make sure we don't get distracted by suspend/resume */
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
netif_stop_queue(dev);
cas_spare_free(cp);
cas_free_rxds(cp);
cas_tx_tiny_free(cp);
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
return 0;
}
unsigned long flags;
int rc = -EOPNOTSUPP;
- /* Hold the PM semaphore while doing ioctl's or we may collide
+ /* Hold the PM mutex while doing ioctl's or we may collide
* with open/close and power management and oops.
*/
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = cp->phy_addr;
break;
};
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
return rc;
}
const struct pci_device_id *ent)
{
static int cas_version_printed = 0;
- unsigned long casreg_base, casreg_len;
+ unsigned long casreg_len;
struct net_device *dev;
struct cas *cp;
int i, err, pci_using_dac;
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "Cannot find proper PCI device "
+ dev_err(&pdev->dev, "Cannot find proper PCI device "
"base address, aborting.\n");
err = -ENODEV;
goto err_out_disable_pdev;
dev = alloc_etherdev(sizeof(*cp));
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
err = -ENOMEM;
goto err_out_disable_pdev;
}
err = pci_request_regions(pdev, dev->name);
if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
goto err_out_free_netdev;
}
pci_set_master(pdev);
if (pci_write_config_byte(pdev,
PCI_CACHE_LINE_SIZE,
cas_cacheline_size)) {
- printk(KERN_ERR PFX "Could not set PCI cache "
+ dev_err(&pdev->dev, "Could not set PCI cache "
"line size\n");
goto err_write_cacheline;
}
err = pci_set_consistent_dma_mask(pdev,
DMA_64BIT_MASK);
if (err < 0) {
- printk(KERN_ERR PFX "Unable to obtain 64-bit DMA "
+ dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
"for consistent allocations\n");
goto err_out_free_res;
}
} else {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
+ dev_err(&pdev->dev, "No usable DMA configuration, "
"aborting.\n");
goto err_out_free_res;
}
pci_using_dac = 0;
}
- casreg_base = pci_resource_start(pdev, 0);
casreg_len = pci_resource_len(pdev, 0);
cp = netdev_priv(dev);
spin_lock_init(&cp->tx_lock[i]);
}
spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
- init_MUTEX(&cp->pm_sem);
+ mutex_init(&cp->pm_mutex);
init_timer(&cp->link_timer);
cp->link_timer.function = cas_link_timer;
cp->timer_ticks = 0;
/* give us access to cassini registers */
- cp->regs = ioremap(casreg_base, casreg_len);
+ cp->regs = pci_iomap(pdev, 0, casreg_len);
if (cp->regs == 0UL) {
- printk(KERN_ERR PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
goto err_out_free_res;
}
cp->casreg_len = casreg_len;
pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
&cp->block_dvma);
if (!cp->init_block) {
- printk(KERN_ERR PFX "Cannot allocate init block, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n");
goto err_out_iounmap;
}
dev->features |= NETIF_F_HIGHDMA;
if (register_netdev(dev)) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
goto err_out_free_consistent;
}
cp->init_block, cp->block_dvma);
err_out_iounmap:
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
if (cp->hw_running)
cas_shutdown(cp);
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
- iounmap(cp->regs);
+ pci_iounmap(pdev, cp->regs);
err_out_free_res:
cp = netdev_priv(dev);
unregister_netdev(dev);
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
flush_scheduled_work();
if (cp->hw_running)
cas_shutdown(cp);
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
#if 1
if (cp->orig_cacheline_size) {
#endif
pci_free_consistent(pdev, sizeof(struct cas_init_block),
cp->init_block, cp->block_dvma);
- iounmap(cp->regs);
+ pci_iounmap(pdev, cp->regs);
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
struct cas *cp = netdev_priv(dev);
unsigned long flags;
- /* We hold the PM semaphore during entire driver
- * sleep time
- */
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
/* If the driver is opened, we stop the DMA */
if (cp->opened) {
if (cp->hw_running)
cas_shutdown(cp);
+ mutex_unlock(&cp->pm_mutex);
return 0;
}
printk(KERN_INFO "%s: resuming\n", dev->name);
+ mutex_lock(&cp->pm_mutex);
cas_hard_reset(cp);
if (cp->opened) {
unsigned long flags;
netif_device_attach(dev);
}
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
return 0;
}
#endif /* CONFIG_PM */