[NETPOLL]: netpoll_poll() cleanup
[safe/jmp/linux-2.6] / net / core / netpoll.c
1 /*
2  * Common framework for low-level network console, dump, and debugger code
3  *
4  * Sep 8 2003  Matt Mackall <mpm@selenic.com>
5  *
6  * based on the netconsole code from:
7  *
8  * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
9  * Copyright (C) 2002  Red Hat, Inc.
10  */
11
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/string.h>
15 #include <linux/if_arp.h>
16 #include <linux/inetdevice.h>
17 #include <linux/inet.h>
18 #include <linux/interrupt.h>
19 #include <linux/netpoll.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
22 #include <linux/rcupdate.h>
23 #include <linux/workqueue.h>
24 #include <net/tcp.h>
25 #include <net/udp.h>
26 #include <asm/unaligned.h>
27
28 /*
29  * We maintain a small pool of fully-sized skbs, to make sure the
30  * message gets out even in extreme OOM situations.
31  */
32
33 #define MAX_UDP_CHUNK 1460
34 #define MAX_SKBS 32
35 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
36
37 static struct sk_buff_head skb_pool;
38
39 static atomic_t trapped;
40
41 #define USEC_PER_POLL   50
42 #define NETPOLL_RX_ENABLED  1
43 #define NETPOLL_RX_DROP     2
44
45 #define MAX_SKB_SIZE \
46                 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
47                                 sizeof(struct iphdr) + sizeof(struct ethhdr))
48
49 static void zap_completion_queue(void);
50 static void arp_reply(struct sk_buff *skb);
51
52 static void queue_process(struct work_struct *work)
53 {
54         struct netpoll_info *npinfo =
55                 container_of(work, struct netpoll_info, tx_work.work);
56         struct sk_buff *skb;
57         unsigned long flags;
58
59         while ((skb = skb_dequeue(&npinfo->txq))) {
60                 struct net_device *dev = skb->dev;
61
62                 if (!netif_device_present(dev) || !netif_running(dev)) {
63                         __kfree_skb(skb);
64                         continue;
65                 }
66
67                 local_irq_save(flags);
68                 netif_tx_lock(dev);
69                 if ((netif_queue_stopped(dev) ||
70                      netif_subqueue_stopped(dev, skb)) ||
71                      dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
72                         skb_queue_head(&npinfo->txq, skb);
73                         netif_tx_unlock(dev);
74                         local_irq_restore(flags);
75
76                         schedule_delayed_work(&npinfo->tx_work, HZ/10);
77                         return;
78                 }
79                 netif_tx_unlock(dev);
80                 local_irq_restore(flags);
81         }
82 }
83
84 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
85                             unsigned short ulen, __be32 saddr, __be32 daddr)
86 {
87         __wsum psum;
88
89         if (uh->check == 0 || skb_csum_unnecessary(skb))
90                 return 0;
91
92         psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
93
94         if (skb->ip_summed == CHECKSUM_COMPLETE &&
95             !csum_fold(csum_add(psum, skb->csum)))
96                 return 0;
97
98         skb->csum = psum;
99
100         return __skb_checksum_complete(skb);
101 }
102
103 /*
104  * Check whether delayed processing was scheduled for our NIC. If so,
105  * we attempt to grab the poll lock and use ->poll() to pump the card.
106  * If this fails, either we've recursed in ->poll() or it's already
107  * running on another CPU.
108  *
109  * Note: we don't mask interrupts with this lock because we're using
110  * trylock here and interrupts are already disabled in the softirq
111  * case. Further, we test the poll_owner to avoid recursion on UP
112  * systems where the lock doesn't exist.
113  *
114  * In cases where there is bi-directional communications, reading only
115  * one message at a time can lead to packets being dropped by the
116  * network adapter, forcing superfluous retries and possibly timeouts.
117  * Thus, we set our budget to greater than 1.
118  */
119 static int poll_one_napi(struct netpoll_info *npinfo,
120                          struct napi_struct *napi, int budget)
121 {
122         int work;
123
124         /* net_rx_action's ->poll() invocations and our's are
125          * synchronized by this test which is only made while
126          * holding the napi->poll_lock.
127          */
128         if (!test_bit(NAPI_STATE_SCHED, &napi->state))
129                 return budget;
130
131         npinfo->rx_flags |= NETPOLL_RX_DROP;
132         atomic_inc(&trapped);
133
134         work = napi->poll(napi, budget);
135
136         atomic_dec(&trapped);
137         npinfo->rx_flags &= ~NETPOLL_RX_DROP;
138
139         return budget - work;
140 }
141
142 static void poll_napi(struct net_device *dev)
143 {
144         struct napi_struct *napi;
145         int budget = 16;
146
147         list_for_each_entry(napi, &dev->napi_list, dev_list) {
148                 if (napi->poll_owner != smp_processor_id() &&
149                     spin_trylock(&napi->poll_lock)) {
150                         budget = poll_one_napi(dev->npinfo, napi, budget);
151                         spin_unlock(&napi->poll_lock);
152
153                         if (!budget)
154                                 break;
155                 }
156         }
157 }
158
159 static void service_arp_queue(struct netpoll_info *npi)
160 {
161         if (npi) {
162                 struct sk_buff *skb;
163
164                 while ((skb = skb_dequeue(&npi->arp_tx)))
165                         arp_reply(skb);
166         }
167 }
168
169 void netpoll_poll(struct netpoll *np)
170 {
171         struct net_device *dev = np->dev;
172
173         if (!dev || !netif_running(dev) || !dev->poll_controller)
174                 return;
175
176         /* Process pending work on NIC */
177         dev->poll_controller(dev);
178
179         poll_napi(dev);
180
181         service_arp_queue(dev->npinfo);
182
183         zap_completion_queue();
184 }
185
186 static void refill_skbs(void)
187 {
188         struct sk_buff *skb;
189         unsigned long flags;
190
191         spin_lock_irqsave(&skb_pool.lock, flags);
192         while (skb_pool.qlen < MAX_SKBS) {
193                 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
194                 if (!skb)
195                         break;
196
197                 __skb_queue_tail(&skb_pool, skb);
198         }
199         spin_unlock_irqrestore(&skb_pool.lock, flags);
200 }
201
202 static void zap_completion_queue(void)
203 {
204         unsigned long flags;
205         struct softnet_data *sd = &get_cpu_var(softnet_data);
206
207         if (sd->completion_queue) {
208                 struct sk_buff *clist;
209
210                 local_irq_save(flags);
211                 clist = sd->completion_queue;
212                 sd->completion_queue = NULL;
213                 local_irq_restore(flags);
214
215                 while (clist != NULL) {
216                         struct sk_buff *skb = clist;
217                         clist = clist->next;
218                         if (skb->destructor)
219                                 dev_kfree_skb_any(skb); /* put this one back */
220                         else
221                                 __kfree_skb(skb);
222                 }
223         }
224
225         put_cpu_var(softnet_data);
226 }
227
228 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
229 {
230         int count = 0;
231         struct sk_buff *skb;
232
233         zap_completion_queue();
234         refill_skbs();
235 repeat:
236
237         skb = alloc_skb(len, GFP_ATOMIC);
238         if (!skb)
239                 skb = skb_dequeue(&skb_pool);
240
241         if (!skb) {
242                 if (++count < 10) {
243                         netpoll_poll(np);
244                         goto repeat;
245                 }
246                 return NULL;
247         }
248
249         atomic_set(&skb->users, 1);
250         skb_reserve(skb, reserve);
251         return skb;
252 }
253
254 static int netpoll_owner_active(struct net_device *dev)
255 {
256         struct napi_struct *napi;
257
258         list_for_each_entry(napi, &dev->napi_list, dev_list) {
259                 if (napi->poll_owner == smp_processor_id())
260                         return 1;
261         }
262         return 0;
263 }
264
265 static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
266 {
267         int status = NETDEV_TX_BUSY;
268         unsigned long tries;
269         struct net_device *dev = np->dev;
270         struct netpoll_info *npinfo = np->dev->npinfo;
271
272         if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
273                 __kfree_skb(skb);
274                 return;
275         }
276
277         /* don't get messages out of order, and no recursion */
278         if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
279                 unsigned long flags;
280
281                 local_irq_save(flags);
282                 /* try until next clock tick */
283                 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
284                      tries > 0; --tries) {
285                         if (netif_tx_trylock(dev)) {
286                                 if (!netif_queue_stopped(dev) &&
287                                     !netif_subqueue_stopped(dev, skb))
288                                         status = dev->hard_start_xmit(skb, dev);
289                                 netif_tx_unlock(dev);
290
291                                 if (status == NETDEV_TX_OK)
292                                         break;
293
294                         }
295
296                         /* tickle device maybe there is some cleanup */
297                         netpoll_poll(np);
298
299                         udelay(USEC_PER_POLL);
300                 }
301                 local_irq_restore(flags);
302         }
303
304         if (status != NETDEV_TX_OK) {
305                 skb_queue_tail(&npinfo->txq, skb);
306                 schedule_delayed_work(&npinfo->tx_work,0);
307         }
308 }
309
310 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
311 {
312         int total_len, eth_len, ip_len, udp_len;
313         struct sk_buff *skb;
314         struct udphdr *udph;
315         struct iphdr *iph;
316         struct ethhdr *eth;
317
318         udp_len = len + sizeof(*udph);
319         ip_len = eth_len = udp_len + sizeof(*iph);
320         total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
321
322         skb = find_skb(np, total_len, total_len - len);
323         if (!skb)
324                 return;
325
326         skb_copy_to_linear_data(skb, msg, len);
327         skb->len += len;
328
329         skb_push(skb, sizeof(*udph));
330         skb_reset_transport_header(skb);
331         udph = udp_hdr(skb);
332         udph->source = htons(np->local_port);
333         udph->dest = htons(np->remote_port);
334         udph->len = htons(udp_len);
335         udph->check = 0;
336         udph->check = csum_tcpudp_magic(htonl(np->local_ip),
337                                         htonl(np->remote_ip),
338                                         udp_len, IPPROTO_UDP,
339                                         csum_partial((unsigned char *)udph, udp_len, 0));
340         if (udph->check == 0)
341                 udph->check = CSUM_MANGLED_0;
342
343         skb_push(skb, sizeof(*iph));
344         skb_reset_network_header(skb);
345         iph = ip_hdr(skb);
346
347         /* iph->version = 4; iph->ihl = 5; */
348         put_unaligned(0x45, (unsigned char *)iph);
349         iph->tos      = 0;
350         put_unaligned(htons(ip_len), &(iph->tot_len));
351         iph->id       = 0;
352         iph->frag_off = 0;
353         iph->ttl      = 64;
354         iph->protocol = IPPROTO_UDP;
355         iph->check    = 0;
356         put_unaligned(htonl(np->local_ip), &(iph->saddr));
357         put_unaligned(htonl(np->remote_ip), &(iph->daddr));
358         iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
359
360         eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
361         skb_reset_mac_header(skb);
362         skb->protocol = eth->h_proto = htons(ETH_P_IP);
363         memcpy(eth->h_source, np->local_mac, 6);
364         memcpy(eth->h_dest, np->remote_mac, 6);
365
366         skb->dev = np->dev;
367
368         netpoll_send_skb(np, skb);
369 }
370
371 static void arp_reply(struct sk_buff *skb)
372 {
373         struct netpoll_info *npinfo = skb->dev->npinfo;
374         struct arphdr *arp;
375         unsigned char *arp_ptr;
376         int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
377         __be32 sip, tip;
378         unsigned char *sha;
379         struct sk_buff *send_skb;
380         struct netpoll *np = NULL;
381
382         if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
383                 np = npinfo->rx_np;
384         if (!np)
385                 return;
386
387         /* No arp on this interface */
388         if (skb->dev->flags & IFF_NOARP)
389                 return;
390
391         if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
392                                  (2 * skb->dev->addr_len) +
393                                  (2 * sizeof(u32)))))
394                 return;
395
396         skb_reset_network_header(skb);
397         skb_reset_transport_header(skb);
398         arp = arp_hdr(skb);
399
400         if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
401              arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
402             arp->ar_pro != htons(ETH_P_IP) ||
403             arp->ar_op != htons(ARPOP_REQUEST))
404                 return;
405
406         arp_ptr = (unsigned char *)(arp+1);
407         /* save the location of the src hw addr */
408         sha = arp_ptr;
409         arp_ptr += skb->dev->addr_len;
410         memcpy(&sip, arp_ptr, 4);
411         arp_ptr += 4;
412         /* if we actually cared about dst hw addr, it would get copied here */
413         arp_ptr += skb->dev->addr_len;
414         memcpy(&tip, arp_ptr, 4);
415
416         /* Should we ignore arp? */
417         if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
418                 return;
419
420         size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
421         send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
422                             LL_RESERVED_SPACE(np->dev));
423
424         if (!send_skb)
425                 return;
426
427         skb_reset_network_header(send_skb);
428         arp = (struct arphdr *) skb_put(send_skb, size);
429         send_skb->dev = skb->dev;
430         send_skb->protocol = htons(ETH_P_ARP);
431
432         /* Fill the device header for the ARP frame */
433         if (dev_hard_header(send_skb, skb->dev, ptype,
434                             sha, np->local_mac,
435                             send_skb->len) < 0) {
436                 kfree_skb(send_skb);
437                 return;
438         }
439
440         /*
441          * Fill out the arp protocol part.
442          *
443          * we only support ethernet device type,
444          * which (according to RFC 1390) should always equal 1 (Ethernet).
445          */
446
447         arp->ar_hrd = htons(np->dev->type);
448         arp->ar_pro = htons(ETH_P_IP);
449         arp->ar_hln = np->dev->addr_len;
450         arp->ar_pln = 4;
451         arp->ar_op = htons(type);
452
453         arp_ptr=(unsigned char *)(arp + 1);
454         memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
455         arp_ptr += np->dev->addr_len;
456         memcpy(arp_ptr, &tip, 4);
457         arp_ptr += 4;
458         memcpy(arp_ptr, sha, np->dev->addr_len);
459         arp_ptr += np->dev->addr_len;
460         memcpy(arp_ptr, &sip, 4);
461
462         netpoll_send_skb(np, send_skb);
463 }
464
465 int __netpoll_rx(struct sk_buff *skb)
466 {
467         int proto, len, ulen;
468         struct iphdr *iph;
469         struct udphdr *uh;
470         struct netpoll_info *npi = skb->dev->npinfo;
471         struct netpoll *np = npi->rx_np;
472
473         if (!np)
474                 goto out;
475         if (skb->dev->type != ARPHRD_ETHER)
476                 goto out;
477
478         /* check if netpoll clients need ARP */
479         if (skb->protocol == htons(ETH_P_ARP) &&
480             atomic_read(&trapped)) {
481                 skb_queue_tail(&npi->arp_tx, skb);
482                 return 1;
483         }
484
485         proto = ntohs(eth_hdr(skb)->h_proto);
486         if (proto != ETH_P_IP)
487                 goto out;
488         if (skb->pkt_type == PACKET_OTHERHOST)
489                 goto out;
490         if (skb_shared(skb))
491                 goto out;
492
493         iph = (struct iphdr *)skb->data;
494         if (!pskb_may_pull(skb, sizeof(struct iphdr)))
495                 goto out;
496         if (iph->ihl < 5 || iph->version != 4)
497                 goto out;
498         if (!pskb_may_pull(skb, iph->ihl*4))
499                 goto out;
500         if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
501                 goto out;
502
503         len = ntohs(iph->tot_len);
504         if (skb->len < len || len < iph->ihl*4)
505                 goto out;
506
507         /*
508          * Our transport medium may have padded the buffer out.
509          * Now We trim to the true length of the frame.
510          */
511         if (pskb_trim_rcsum(skb, len))
512                 goto out;
513
514         if (iph->protocol != IPPROTO_UDP)
515                 goto out;
516
517         len -= iph->ihl*4;
518         uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
519         ulen = ntohs(uh->len);
520
521         if (ulen != len)
522                 goto out;
523         if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
524                 goto out;
525         if (np->local_ip && np->local_ip != ntohl(iph->daddr))
526                 goto out;
527         if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
528                 goto out;
529         if (np->local_port && np->local_port != ntohs(uh->dest))
530                 goto out;
531
532         np->rx_hook(np, ntohs(uh->source),
533                     (char *)(uh+1),
534                     ulen - sizeof(struct udphdr));
535
536         kfree_skb(skb);
537         return 1;
538
539 out:
540         if (atomic_read(&trapped)) {
541                 kfree_skb(skb);
542                 return 1;
543         }
544
545         return 0;
546 }
547
548 void netpoll_print_options(struct netpoll *np)
549 {
550         DECLARE_MAC_BUF(mac);
551         printk(KERN_INFO "%s: local port %d\n",
552                          np->name, np->local_port);
553         printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
554                          np->name, HIPQUAD(np->local_ip));
555         printk(KERN_INFO "%s: interface %s\n",
556                          np->name, np->dev_name);
557         printk(KERN_INFO "%s: remote port %d\n",
558                          np->name, np->remote_port);
559         printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
560                          np->name, HIPQUAD(np->remote_ip));
561         printk(KERN_INFO "%s: remote ethernet address %s\n",
562                          np->name, print_mac(mac, np->remote_mac));
563 }
564
565 int netpoll_parse_options(struct netpoll *np, char *opt)
566 {
567         char *cur=opt, *delim;
568
569         if (*cur != '@') {
570                 if ((delim = strchr(cur, '@')) == NULL)
571                         goto parse_failed;
572                 *delim = 0;
573                 np->local_port = simple_strtol(cur, NULL, 10);
574                 cur = delim;
575         }
576         cur++;
577
578         if (*cur != '/') {
579                 if ((delim = strchr(cur, '/')) == NULL)
580                         goto parse_failed;
581                 *delim = 0;
582                 np->local_ip = ntohl(in_aton(cur));
583                 cur = delim;
584         }
585         cur++;
586
587         if (*cur != ',') {
588                 /* parse out dev name */
589                 if ((delim = strchr(cur, ',')) == NULL)
590                         goto parse_failed;
591                 *delim = 0;
592                 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
593                 cur = delim;
594         }
595         cur++;
596
597         if (*cur != '@') {
598                 /* dst port */
599                 if ((delim = strchr(cur, '@')) == NULL)
600                         goto parse_failed;
601                 *delim = 0;
602                 np->remote_port = simple_strtol(cur, NULL, 10);
603                 cur = delim;
604         }
605         cur++;
606
607         /* dst ip */
608         if ((delim = strchr(cur, '/')) == NULL)
609                 goto parse_failed;
610         *delim = 0;
611         np->remote_ip = ntohl(in_aton(cur));
612         cur = delim + 1;
613
614         if (*cur != 0) {
615                 /* MAC address */
616                 if ((delim = strchr(cur, ':')) == NULL)
617                         goto parse_failed;
618                 *delim = 0;
619                 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
620                 cur = delim + 1;
621                 if ((delim = strchr(cur, ':')) == NULL)
622                         goto parse_failed;
623                 *delim = 0;
624                 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
625                 cur = delim + 1;
626                 if ((delim = strchr(cur, ':')) == NULL)
627                         goto parse_failed;
628                 *delim = 0;
629                 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
630                 cur = delim + 1;
631                 if ((delim = strchr(cur, ':')) == NULL)
632                         goto parse_failed;
633                 *delim = 0;
634                 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
635                 cur = delim + 1;
636                 if ((delim = strchr(cur, ':')) == NULL)
637                         goto parse_failed;
638                 *delim = 0;
639                 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
640                 cur = delim + 1;
641                 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
642         }
643
644         netpoll_print_options(np);
645
646         return 0;
647
648  parse_failed:
649         printk(KERN_INFO "%s: couldn't parse config at %s!\n",
650                np->name, cur);
651         return -1;
652 }
653
654 int netpoll_setup(struct netpoll *np)
655 {
656         struct net_device *ndev = NULL;
657         struct in_device *in_dev;
658         struct netpoll_info *npinfo;
659         unsigned long flags;
660         int err;
661
662         if (np->dev_name)
663                 ndev = dev_get_by_name(&init_net, np->dev_name);
664         if (!ndev) {
665                 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
666                        np->name, np->dev_name);
667                 return -ENODEV;
668         }
669
670         np->dev = ndev;
671         if (!ndev->npinfo) {
672                 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
673                 if (!npinfo) {
674                         err = -ENOMEM;
675                         goto release;
676                 }
677
678                 npinfo->rx_flags = 0;
679                 npinfo->rx_np = NULL;
680
681                 spin_lock_init(&npinfo->rx_lock);
682                 skb_queue_head_init(&npinfo->arp_tx);
683                 skb_queue_head_init(&npinfo->txq);
684                 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
685
686                 atomic_set(&npinfo->refcnt, 1);
687         } else {
688                 npinfo = ndev->npinfo;
689                 atomic_inc(&npinfo->refcnt);
690         }
691
692         if (!ndev->poll_controller) {
693                 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
694                        np->name, np->dev_name);
695                 err = -ENOTSUPP;
696                 goto release;
697         }
698
699         if (!netif_running(ndev)) {
700                 unsigned long atmost, atleast;
701
702                 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
703                        np->name, np->dev_name);
704
705                 rtnl_lock();
706                 err = dev_open(ndev);
707                 rtnl_unlock();
708
709                 if (err) {
710                         printk(KERN_ERR "%s: failed to open %s\n",
711                                np->name, ndev->name);
712                         goto release;
713                 }
714
715                 atleast = jiffies + HZ/10;
716                 atmost = jiffies + 4*HZ;
717                 while (!netif_carrier_ok(ndev)) {
718                         if (time_after(jiffies, atmost)) {
719                                 printk(KERN_NOTICE
720                                        "%s: timeout waiting for carrier\n",
721                                        np->name);
722                                 break;
723                         }
724                         cond_resched();
725                 }
726
727                 /* If carrier appears to come up instantly, we don't
728                  * trust it and pause so that we don't pump all our
729                  * queued console messages into the bitbucket.
730                  */
731
732                 if (time_before(jiffies, atleast)) {
733                         printk(KERN_NOTICE "%s: carrier detect appears"
734                                " untrustworthy, waiting 4 seconds\n",
735                                np->name);
736                         msleep(4000);
737                 }
738         }
739
740         if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
741                 memcpy(np->local_mac, ndev->dev_addr, 6);
742
743         if (!np->local_ip) {
744                 rcu_read_lock();
745                 in_dev = __in_dev_get_rcu(ndev);
746
747                 if (!in_dev || !in_dev->ifa_list) {
748                         rcu_read_unlock();
749                         printk(KERN_ERR "%s: no IP address for %s, aborting\n",
750                                np->name, np->dev_name);
751                         err = -EDESTADDRREQ;
752                         goto release;
753                 }
754
755                 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
756                 rcu_read_unlock();
757                 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
758                        np->name, HIPQUAD(np->local_ip));
759         }
760
761         if (np->rx_hook) {
762                 spin_lock_irqsave(&npinfo->rx_lock, flags);
763                 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
764                 npinfo->rx_np = np;
765                 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
766         }
767
768         /* fill up the skb queue */
769         refill_skbs();
770
771         /* last thing to do is link it to the net device structure */
772         ndev->npinfo = npinfo;
773
774         /* avoid racing with NAPI reading npinfo */
775         synchronize_rcu();
776
777         return 0;
778
779  release:
780         if (!ndev->npinfo)
781                 kfree(npinfo);
782         np->dev = NULL;
783         dev_put(ndev);
784         return err;
785 }
786
787 static int __init netpoll_init(void)
788 {
789         skb_queue_head_init(&skb_pool);
790         return 0;
791 }
792 core_initcall(netpoll_init);
793
794 void netpoll_cleanup(struct netpoll *np)
795 {
796         struct netpoll_info *npinfo;
797         unsigned long flags;
798
799         if (np->dev) {
800                 npinfo = np->dev->npinfo;
801                 if (npinfo) {
802                         if (npinfo->rx_np == np) {
803                                 spin_lock_irqsave(&npinfo->rx_lock, flags);
804                                 npinfo->rx_np = NULL;
805                                 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
806                                 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
807                         }
808
809                         if (atomic_dec_and_test(&npinfo->refcnt)) {
810                                 skb_queue_purge(&npinfo->arp_tx);
811                                 skb_queue_purge(&npinfo->txq);
812                                 cancel_rearming_delayed_work(&npinfo->tx_work);
813
814                                 /* clean after last, unfinished work */
815                                 __skb_queue_purge(&npinfo->txq);
816                                 kfree(npinfo);
817                                 np->dev->npinfo = NULL;
818                         }
819                 }
820
821                 dev_put(np->dev);
822         }
823
824         np->dev = NULL;
825 }
826
827 int netpoll_trap(void)
828 {
829         return atomic_read(&trapped);
830 }
831
832 void netpoll_set_trap(int trap)
833 {
834         if (trap)
835                 atomic_inc(&trapped);
836         else
837                 atomic_dec(&trapped);
838 }
839
840 EXPORT_SYMBOL(netpoll_set_trap);
841 EXPORT_SYMBOL(netpoll_trap);
842 EXPORT_SYMBOL(netpoll_print_options);
843 EXPORT_SYMBOL(netpoll_parse_options);
844 EXPORT_SYMBOL(netpoll_setup);
845 EXPORT_SYMBOL(netpoll_cleanup);
846 EXPORT_SYMBOL(netpoll_send_udp);
847 EXPORT_SYMBOL(netpoll_poll);