X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Fxen-netfront.c;h=d504e2b6025705ded7ee1ac4524791818d8c5ec5;hb=5afd06ccd6b309f6a47d7e8fa4ea349b25738d97;hp=4445810335a8ef7dff90f4c01683308d12e66c73;hpb=10a273a67343e1f317652d69b09a212ee0284b44;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 4445810..d504e2b 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -40,8 +40,10 @@ #include #include #include +#include #include +#include #include #include #include @@ -51,7 +53,7 @@ #include #include -static struct ethtool_ops xennet_ethtool_ops; +static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { struct page *page; @@ -72,24 +74,14 @@ struct netfront_info { struct list_head list; struct net_device *netdev; - struct net_device_stats stats; - - struct xen_netif_tx_front_ring tx; - struct xen_netif_rx_front_ring rx; - - spinlock_t tx_lock; - spinlock_t rx_lock; + struct napi_struct napi; unsigned int evtchn; + struct xenbus_device *xbdev; - /* Receive-ring batched refills. */ -#define RX_MIN_TARGET 8 -#define RX_DFL_MIN_TARGET 64 -#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) - unsigned rx_min_target, rx_max_target, rx_target; - struct sk_buff_head rx_batch; - - struct timer_list rx_refill_timer; + spinlock_t tx_lock; + struct xen_netif_tx_front_ring tx; + int tx_ring_ref; /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries @@ -102,20 +94,29 @@ struct netfront_info { */ union skb_entry { struct sk_buff *skb; - unsigned link; + unsigned long link; } tx_skbs[NET_TX_RING_SIZE]; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; + spinlock_t rx_lock ____cacheline_aligned_in_smp; + struct xen_netif_rx_front_ring rx; + int rx_ring_ref; + + /* Receive-ring batched refills. */ +#define RX_MIN_TARGET 8 +#define RX_DFL_MIN_TARGET 64 +#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) + unsigned rx_min_target, rx_max_target, rx_target; + struct sk_buff_head rx_batch; + + struct timer_list rx_refill_timer; + struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; - struct xenbus_device *xbdev; - int tx_ring_ref; - int rx_ring_ref; - unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; @@ -126,6 +127,17 @@ struct netfront_rx_info { struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; +static void skb_entry_set_link(union skb_entry *list, unsigned short id) +{ + list->link = id; +} + +static int skb_entry_is_link(const union skb_entry *list) +{ + BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); + return ((unsigned long)list->skb < PAGE_OFFSET); +} + /* * Access macros for acquiring freeing slots in tx_skbs[]. */ @@ -133,7 +145,7 @@ struct netfront_rx_info { static void add_id_to_freelist(unsigned *head, union skb_entry *list, unsigned short id) { - list[id].link = *head; + skb_entry_set_link(&list[id], *head); *head = id; } @@ -185,7 +197,8 @@ static int xennet_can_sg(struct net_device *dev) static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; - netif_rx_schedule(dev); + struct netfront_info *np = netdev_priv(dev); + napi_schedule(&np->napi); } static int netfront_tx_slot_available(struct netfront_info *np) @@ -212,11 +225,9 @@ static void xennet_alloc_rx_buffers(struct net_device *dev) struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; - struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; - int nr_flips; struct xen_netif_rx_request *req; if (unlikely(!netif_carrier_ok(dev))) @@ -230,11 +241,14 @@ static void xennet_alloc_rx_buffers(struct net_device *dev) */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { - skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD, + skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; + /* Align ip header to a 16 bytes boundary */ + skb_reserve(skb, NET_IP_ALIGN); + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); @@ -266,7 +280,7 @@ no_skb: np->rx_target = np->rx_max_target; refill: - for (nr_flips = i = 0; ; i++) { + for (i = 0; ; i++) { skb = __skb_dequeue(&np->rx_batch); if (skb == NULL) break; @@ -295,38 +309,7 @@ no_skb: req->gref = ref; } - if (nr_flips != 0) { - reservation.extent_start = np->rx_pfn_array; - reservation.nr_extents = nr_flips; - reservation.extent_order = 0; - reservation.address_bits = 0; - reservation.domid = DOMID_SELF; - - if (!xen_feature(XENFEAT_auto_translated_physmap)) { - /* After all PTEs have been zapped, flush the TLB. */ - np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = - UVMF_TLB_FLUSH|UVMF_ALL; - - /* Give away a batch of pages. */ - np->rx_mcl[i].op = __HYPERVISOR_memory_op; - np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; - np->rx_mcl[i].args[1] = (unsigned long)&reservation; - - /* Zap PTEs and give away pages in one big - * multicall. */ - (void)HYPERVISOR_multicall(np->rx_mcl, i+1); - - /* Check return status of HYPERVISOR_memory_op(). */ - if (unlikely(np->rx_mcl[i].result != i)) - panic("Unable to reduce memory reservation\n"); - } else { - if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, - &reservation) != i) - panic("Unable to reduce memory reservation\n"); - } - } else { - wmb(); /* barrier so backend seens requests */ - } + wmb(); /* barrier so backend seens requests */ /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; @@ -340,18 +323,18 @@ static int xennet_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); - memset(&np->stats, 0, sizeof(np->stats)); + napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netif_carrier_ok(dev)) { xennet_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) - netif_rx_schedule(dev); + napi_schedule(&np->napi); } spin_unlock_bh(&np->rx_lock); - xennet_maybe_wake_tx(dev); + netif_start_queue(dev); return 0; } @@ -493,7 +476,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); - frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; + frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); @@ -566,8 +549,8 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (notify) notify_remote_via_irq(np->netdev->irq); - np->stats.tx_bytes += skb->len; - np->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(dev); @@ -577,27 +560,22 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irq(&np->tx_lock); - return 0; + return NETDEV_TX_OK; drop: - np->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); - return 0; + return NETDEV_TX_OK; } static int xennet_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); + napi_disable(&np->napi); return 0; } -static struct net_device_stats *xennet_get_stats(struct net_device *dev) -{ - struct netfront_info *np = netdev_priv(dev); - return &np->stats; -} - static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { @@ -832,9 +810,8 @@ out: } static int handle_incoming_queue(struct net_device *dev, - struct sk_buff_head *rxq) + struct sk_buff_head *rxq) { - struct netfront_info *np = netdev_priv(dev); int packets_dropped = 0; struct sk_buff *skb; @@ -856,31 +833,31 @@ static int handle_incoming_queue(struct net_device *dev, if (skb_checksum_setup(skb)) { kfree_skb(skb); packets_dropped++; - np->stats.rx_errors++; + dev->stats.rx_errors++; continue; } } - np->stats.rx_packets++; - np->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; /* Pass it up. */ netif_receive_skb(skb); - dev->last_rx = jiffies; } return packets_dropped; } -static int xennet_poll(struct net_device *dev, int *pbudget) +static int xennet_poll(struct napi_struct *napi, int budget) { - struct netfront_info *np = netdev_priv(dev); + struct netfront_info *np = container_of(napi, struct netfront_info, napi); + struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct xen_netif_rx_response *rx = &rinfo.rx; struct xen_netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; - int work_done, budget, more_to_do = 1; + int work_done; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; @@ -890,18 +867,10 @@ static int xennet_poll(struct net_device *dev, int *pbudget) spin_lock(&np->rx_lock); - if (unlikely(!netif_carrier_ok(dev))) { - spin_unlock(&np->rx_lock); - return 0; - } - skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); - budget = *pbudget; - if (budget > dev->quota) - budget = dev->quota; rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ @@ -917,7 +886,7 @@ static int xennet_poll(struct net_device *dev, int *pbudget) err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); - np->stats.rx_errors++; + dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } @@ -992,8 +961,7 @@ err: work_done++; } - while ((skb = __skb_dequeue(&errq))) - kfree_skb(skb); + __skb_queue_purge(&errq); work_done -= handle_incoming_queue(dev, &rxq); @@ -1006,22 +974,21 @@ err: xennet_alloc_rx_buffers(dev); - *pbudget -= work_done; - dev->quota -= work_done; - if (work_done < budget) { + int more_to_do = 0; + local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do) - __netif_rx_complete(dev); + __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); - return more_to_do; + return work_done; } static int xennet_change_mtu(struct net_device *dev, int mtu) @@ -1041,7 +1008,7 @@ static void xennet_release_tx_bufs(struct netfront_info *np) for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ - if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET) + if (skb_entry_is_link(&np->tx_skbs[i])) continue; skb = np->tx_skbs[i].skb; @@ -1120,14 +1087,13 @@ static void xennet_release_rx_bufs(struct netfront_info *np) if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, - 0, DOMID_SELF); + NULL, DOMID_SELF); mcl++; HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); } } - while ((skb = __skb_dequeue(&free_list)) != NULL) - dev_kfree_skb(skb); + __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } @@ -1141,6 +1107,16 @@ static void xennet_uninit(struct net_device *dev) gnttab_free_grant_references(np->gref_rx_head); } +static const struct net_device_ops xennet_netdev_ops = { + .ndo_open = xennet_open, + .ndo_uninit = xennet_uninit, + .ndo_stop = xennet_close, + .ndo_start_xmit = xennet_start_xmit, + .ndo_change_mtu = xennet_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) { int i, err; @@ -1172,7 +1148,7 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev /* Initialise tx_skbs as a free chain containing every entry. */ np->tx_skb_freelist = 0; for (i = 0; i < NET_TX_RING_SIZE; i++) { - np->tx_skbs[i].link = i+1; + skb_entry_set_link(&np->tx_skbs[i], i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } @@ -1197,18 +1173,12 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev goto exit_free_tx; } - netdev->open = xennet_open; - netdev->hard_start_xmit = xennet_start_xmit; - netdev->stop = xennet_close; - netdev->get_stats = xennet_get_stats; - netdev->poll = xennet_poll; - netdev->uninit = xennet_uninit; - netdev->change_mtu = xennet_change_mtu; - netdev->weight = 64; + netdev->netdev_ops = &xennet_netdev_ops; + + netif_napi_add(netdev, &np->napi, xennet_poll, 64); netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; @@ -1244,7 +1214,7 @@ static int __devinit netfront_probe(struct xenbus_device *dev, } info = netdev_priv(netdev); - dev->dev.driver_data = info; + dev_set_drvdata(&dev->dev, info); err = register_netdev(info->netdev); if (err) { @@ -1265,7 +1235,7 @@ static int __devinit netfront_probe(struct xenbus_device *dev, fail: free_netdev(netdev); - dev->dev.driver_data = NULL; + dev_set_drvdata(&dev->dev, NULL); return err; } @@ -1307,7 +1277,7 @@ static void xennet_disconnect_backend(struct netfront_info *info) */ static int netfront_resume(struct xenbus_device *dev) { - struct netfront_info *info = dev->dev.driver_data; + struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); @@ -1349,7 +1319,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id) xennet_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) - netif_rx_schedule(dev); + napi_schedule(&np->napi); } spin_unlock_irqrestore(&np->tx_lock, flags); @@ -1376,7 +1346,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) goto fail; } - txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); + txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); @@ -1392,7 +1362,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) } info->tx_ring_ref = err; - rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); + rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); @@ -1543,7 +1513,7 @@ static int xennet_set_tso(struct net_device *dev, u32 data) static void xennet_set_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ - dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; + dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; xennet_set_sg(dev, 0); @@ -1571,7 +1541,7 @@ static int xennet_connect(struct net_device *dev) if (!feature_rx_copy) { dev_info(&dev->dev, - "backend does not support copying recieve path"); + "backend does not support copying receive path\n"); return -ENODEV; } @@ -1632,7 +1602,7 @@ static int xennet_connect(struct net_device *dev) static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { - struct netfront_info *np = dev->dev.driver_data; + struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); @@ -1659,13 +1629,10 @@ static void backend_changed(struct xenbus_device *dev, } } -static struct ethtool_ops xennet_ethtool_ops = +static const struct ethtool_ops xennet_ethtool_ops = { - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, }; @@ -1809,7 +1776,7 @@ static struct xenbus_device_id netfront_ids[] = { static int __devexit xennet_remove(struct xenbus_device *dev) { - struct netfront_info *info = dev->dev.driver_data; + struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); @@ -1826,7 +1793,7 @@ static int __devexit xennet_remove(struct xenbus_device *dev) return 0; } -static struct xenbus_driver netfront = { +static struct xenbus_driver netfront_driver = { .name = "vif", .owner = THIS_MODULE, .ids = netfront_ids, @@ -1838,27 +1805,29 @@ static struct xenbus_driver netfront = { static int __init netif_init(void) { - if (!is_running_on_xen()) + if (!xen_domain()) return -ENODEV; - if (is_initial_xendomain()) + if (xen_initial_domain()) return 0; printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); - return xenbus_register_frontend(&netfront); + return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { - if (is_initial_xendomain()) + if (xen_initial_domain()) return; - return xenbus_unregister_driver(&netfront); + xenbus_unregister_driver(&netfront_driver); } module_exit(netif_exit); MODULE_DESCRIPTION("Xen virtual network device frontend"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("xen:vif"); +MODULE_ALIAS("xennet");