WorkStruct: make allyesconfig
[safe/jmp/linux-2.6] / net / core / netpoll.c
index e8e05ce..63f24c9 100644 (file)
@@ -54,8 +54,9 @@ static atomic_t trapped;
                                sizeof(struct iphdr) + sizeof(struct ethhdr))
 
 static void zap_completion_queue(void);
+static void arp_reply(struct sk_buff *skb);
 
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
 {
        unsigned long flags;
        struct sk_buff *skb;
@@ -76,7 +77,7 @@ static void queue_process(void *p)
        }
 }
 
-static DECLARE_WORK(send_queue, queue_process, NULL);
+static DECLARE_WORK(send_queue, queue_process);
 
 void netpoll_queue(struct sk_buff *skb)
 {
@@ -109,7 +110,7 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
 
        psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
 
-       if (skb->ip_summed == CHECKSUM_HW &&
+       if (skb->ip_summed == CHECKSUM_COMPLETE &&
            !(u16)csum_fold(csum_add(psum, skb->csum)))
                return 0;
 
@@ -153,6 +154,22 @@ static void poll_napi(struct netpoll *np)
        }
 }
 
+static void service_arp_queue(struct netpoll_info *npi)
+{
+       struct sk_buff *skb;
+
+       if (unlikely(!npi))
+               return;
+
+       skb = skb_dequeue(&npi->arp_tx);
+
+       while (skb != NULL) {
+               arp_reply(skb);
+               skb = skb_dequeue(&npi->arp_tx);
+       }
+       return;
+}
+
 void netpoll_poll(struct netpoll *np)
 {
        if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
@@ -163,6 +180,8 @@ void netpoll_poll(struct netpoll *np)
        if (np->dev->poll)
                poll_napi(np);
 
+       service_arp_queue(np->dev->npinfo);
+
        zap_completion_queue();
 }
 
@@ -273,24 +292,17 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 
        do {
                npinfo->tries--;
-               spin_lock(&np->dev->xmit_lock);
-               np->dev->xmit_lock_owner = smp_processor_id();
+               netif_tx_lock(np->dev);
 
                /*
                 * network drivers do not expect to be called if the queue is
                 * stopped.
                 */
-               if (netif_queue_stopped(np->dev)) {
-                       np->dev->xmit_lock_owner = -1;
-                       spin_unlock(&np->dev->xmit_lock);
-                       netpoll_poll(np);
-                       udelay(50);
-                       continue;
-               }
+               status = NETDEV_TX_BUSY;
+               if (!netif_queue_stopped(np->dev))
+                       status = np->dev->hard_start_xmit(skb, np->dev);
 
-               status = np->dev->hard_start_xmit(skb, np->dev);
-               np->dev->xmit_lock_owner = -1;
-               spin_unlock(&np->dev->xmit_lock);
+               netif_tx_unlock(np->dev);
 
                /* success */
                if(!status) {
@@ -323,13 +335,19 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        memcpy(skb->data, msg, len);
        skb->len += len;
 
-       udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
+       skb->h.uh = udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
        udph->source = htons(np->local_port);
        udph->dest = htons(np->remote_port);
        udph->len = htons(udp_len);
        udph->check = 0;
+       udph->check = csum_tcpudp_magic(htonl(np->local_ip),
+                                       htonl(np->remote_ip),
+                                       udp_len, IPPROTO_UDP,
+                                       csum_partial((unsigned char *)udph, udp_len, 0));
+       if (udph->check == 0)
+               udph->check = -1;
 
-       iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
+       skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
 
        /* iph->version = 4; iph->ihl = 5; */
        put_unaligned(0x45, (unsigned char *)iph);
@@ -345,8 +363,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
 
        eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
-
-       eth->h_proto = htons(ETH_P_IP);
+       skb->mac.raw = skb->data;
+       skb->protocol = eth->h_proto = htons(ETH_P_IP);
        memcpy(eth->h_source, np->local_mac, 6);
        memcpy(eth->h_dest, np->remote_mac, 6);
 
@@ -449,7 +467,9 @@ int __netpoll_rx(struct sk_buff *skb)
        int proto, len, ulen;
        struct iphdr *iph;
        struct udphdr *uh;
-       struct netpoll *np = skb->dev->npinfo->rx_np;
+       struct netpoll_info *npi = skb->dev->npinfo;
+       struct netpoll *np = npi->rx_np;
+
 
        if (!np)
                goto out;
@@ -459,7 +479,7 @@ int __netpoll_rx(struct sk_buff *skb)
        /* check if netpoll clients need ARP */
        if (skb->protocol == __constant_htons(ETH_P_ARP) &&
            atomic_read(&trapped)) {
-               arp_reply(skb);
+               skb_queue_tail(&npi->arp_tx, skb);
                return 1;
        }
 
@@ -654,6 +674,7 @@ int netpoll_setup(struct netpoll *np)
                npinfo->poll_owner = -1;
                npinfo->tries = MAX_RETRIES;
                spin_lock_init(&npinfo->rx_lock);
+               skb_queue_head_init(&npinfo->arp_tx);
        } else
                npinfo = ndev->npinfo;