{
struct net_device *dev = (struct net_device *)data;
struct netfront_info *np = netdev_priv(dev);
- netif_rx_schedule(dev, &np->napi);
+ netif_rx_schedule(&np->napi);
}
static int netfront_tx_slot_available(struct netfront_info *np)
*/
batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
- skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
+ skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto no_skb;
+ /* Align ip header to a 16 bytes boundary */
+ skb_reserve(skb, NET_IP_ALIGN);
+
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page) {
kfree_skb(skb);
xennet_alloc_rx_buffers(dev);
np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
- netif_rx_schedule(dev, &np->napi);
+ netif_rx_schedule(&np->napi);
}
spin_unlock_bh(&np->rx_lock);
/* Pass it up. */
netif_receive_skb(skb);
- dev->last_rx = jiffies;
}
return packets_dropped;
RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
if (!more_to_do)
- __netif_rx_complete(dev, napi);
+ __netif_rx_complete(napi);
local_irq_restore(flags);
}
xennet_tx_buf_gc(dev);
/* Under tx_lock: protects access to rx shared-ring indexes. */
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
- netif_rx_schedule(dev, &np->napi);
+ netif_rx_schedule(&np->napi);
}
spin_unlock_irqrestore(&np->tx_lock, flags);
return 0;
}
-static struct xenbus_driver netfront = {
+static struct xenbus_driver netfront_driver = {
.name = "vif",
.owner = THIS_MODULE,
.ids = netfront_ids,
printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
- return xenbus_register_frontend(&netfront);
+ return xenbus_register_frontend(&netfront_driver);
}
module_init(netif_init);
if (xen_initial_domain())
return;
- xenbus_unregister_driver(&netfront);
+ xenbus_unregister_driver(&netfront_driver);
}
module_exit(netif_exit);