X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=net%2Fcore%2Fnetpoll.c;h=9675f312830dd762bcd530eee90569de5a89b320;hb=7762f206a3f3a19a38ed91a3d87f019d8b4eafc1;hp=b04d643fc3c704cf21619251713ca190f3986554;hpb=a25606c845856e5ca5ed54d23cab077e3a49bf10;p=safe%2Fjmp%2Flinux-2.6 diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b04d643..9675f31 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -24,6 +24,7 @@ #include #include #include +#include /* * We maintain a small pool of fully-sized skbs, to make sure the @@ -58,25 +59,29 @@ static void queue_process(struct work_struct *work) while ((skb = skb_dequeue(&npinfo->txq))) { struct net_device *dev = skb->dev; + const struct net_device_ops *ops = dev->netdev_ops; + struct netdev_queue *txq; if (!netif_device_present(dev) || !netif_running(dev)) { __kfree_skb(skb); continue; } + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + local_irq_save(flags); - netif_tx_lock(dev); - if ((netif_queue_stopped(dev) || - netif_subqueue_stopped(dev, skb)) || - dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) || + netif_tx_queue_frozen(txq) || + ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { skb_queue_head(&npinfo->txq, skb); - netif_tx_unlock(dev); + __netif_tx_unlock(txq); local_irq_restore(flags); schedule_delayed_work(&npinfo->tx_work, HZ/10); return; } - netif_tx_unlock(dev); + __netif_tx_unlock(txq); local_irq_restore(flags); } } @@ -130,9 +135,12 @@ static int poll_one_napi(struct netpoll_info *npinfo, npinfo->rx_flags |= NETPOLL_RX_DROP; atomic_inc(&trapped); + set_bit(NAPI_STATE_NPSVC, &napi->state); work = napi->poll(napi, budget); + trace_napi_poll(napi); + clear_bit(NAPI_STATE_NPSVC, &napi->state); atomic_dec(&trapped); npinfo->rx_flags &= ~NETPOLL_RX_DROP; @@ -169,12 +177,17 @@ static void service_arp_queue(struct netpoll_info *npi) void netpoll_poll(struct netpoll *np) { struct net_device *dev = np->dev; + const struct net_device_ops *ops; + + if (!dev || !netif_running(dev)) + return; - if (!dev || !netif_running(dev) || !dev->poll_controller) + ops = dev->netdev_ops; + if (!ops->ndo_poll_controller) return; /* Process pending work on NIC */ - dev->poll_controller(dev); + ops->ndo_poll_controller(dev); poll_napi(dev); @@ -269,6 +282,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) int status = NETDEV_TX_BUSY; unsigned long tries; struct net_device *dev = np->dev; + const struct net_device_ops *ops = dev->netdev_ops; struct netpoll_info *npinfo = np->dev->npinfo; if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { @@ -278,17 +292,22 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) /* don't get messages out of order, and no recursion */ if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { + struct netdev_queue *txq; unsigned long flags; + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + local_irq_save(flags); /* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { - if (netif_tx_trylock(dev)) { - if (!netif_queue_stopped(dev) && - !netif_subqueue_stopped(dev, skb)) - status = dev->hard_start_xmit(skb, dev); - netif_tx_unlock(dev); + if (__netif_tx_trylock(txq)) { + if (!netif_tx_queue_stopped(txq)) { + status = ops->ndo_start_xmit(skb, dev); + if (status == NETDEV_TX_OK) + txq_trans_update(txq); + } + __netif_tx_unlock(txq); if (status == NETDEV_TX_OK) break; @@ -335,10 +354,10 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) udph->dest = htons(np->remote_port); udph->len = htons(udp_len); udph->check = 0; - udph->check = csum_tcpudp_magic(htonl(np->local_ip), - htonl(np->remote_ip), + udph->check = csum_tcpudp_magic(np->local_ip, + np->remote_ip, udp_len, IPPROTO_UDP, - csum_partial((unsigned char *)udph, udp_len, 0)); + csum_partial(udph, udp_len, 0)); if (udph->check == 0) udph->check = CSUM_MANGLED_0; @@ -355,8 +374,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) iph->ttl = 64; iph->protocol = IPPROTO_UDP; iph->check = 0; - put_unaligned(htonl(np->local_ip), &(iph->saddr)); - put_unaligned(htonl(np->remote_ip), &(iph->daddr)); + put_unaligned(np->local_ip, &(iph->saddr)); + put_unaligned(np->remote_ip, &(iph->daddr)); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); @@ -414,12 +433,12 @@ static void arp_reply(struct sk_buff *skb) memcpy(&tip, arp_ptr, 4); /* Should we ignore arp? */ - if (tip != htonl(np->local_ip) || + if (tip != np->local_ip || ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) return; size = arp_hdr_len(skb->dev); - send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), + send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), LL_RESERVED_SPACE(np->dev)); if (!send_skb) @@ -523,9 +542,9 @@ int __netpoll_rx(struct sk_buff *skb) goto out; if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) goto out; - if (np->local_ip && np->local_ip != ntohl(iph->daddr)) + if (np->local_ip && np->local_ip != iph->daddr) goto out; - if (np->remote_ip && np->remote_ip != ntohl(iph->saddr)) + if (np->remote_ip && np->remote_ip != iph->saddr) goto out; if (np->local_port && np->local_port != ntohs(uh->dest)) goto out; @@ -548,19 +567,18 @@ out: void netpoll_print_options(struct netpoll *np) { - DECLARE_MAC_BUF(mac); printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port); - printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", - np->name, HIPQUAD(np->local_ip)); + printk(KERN_INFO "%s: local IP %pI4\n", + np->name, &np->local_ip); printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name); printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port); - printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", - np->name, HIPQUAD(np->remote_ip)); - printk(KERN_INFO "%s: remote ethernet address %s\n", - np->name, print_mac(mac, np->remote_mac)); + printk(KERN_INFO "%s: remote IP %pI4\n", + np->name, &np->remote_ip); + printk(KERN_INFO "%s: remote ethernet address %pM\n", + np->name, np->remote_mac); } int netpoll_parse_options(struct netpoll *np, char *opt) @@ -580,7 +598,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) if ((delim = strchr(cur, '/')) == NULL) goto parse_failed; *delim = 0; - np->local_ip = ntohl(in_aton(cur)); + np->local_ip = in_aton(cur); cur = delim; } cur++; @@ -609,7 +627,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) if ((delim = strchr(cur, '/')) == NULL) goto parse_failed; *delim = 0; - np->remote_ip = ntohl(in_aton(cur)); + np->remote_ip = in_aton(cur); cur = delim + 1; if (*cur != 0) { @@ -690,7 +708,7 @@ int netpoll_setup(struct netpoll *np) atomic_inc(&npinfo->refcnt); } - if (!ndev->poll_controller) { + if (!ndev->netdev_ops->ndo_poll_controller) { printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", np->name, np->dev_name); err = -ENOTSUPP; @@ -750,10 +768,9 @@ int netpoll_setup(struct netpoll *np) goto release; } - np->local_ip = ntohl(in_dev->ifa_list->ifa_local); + np->local_ip = in_dev->ifa_list->ifa_local; rcu_read_unlock(); - printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", - np->name, HIPQUAD(np->local_ip)); + printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); } if (np->rx_hook) {