0643fb6d47c4a19093f245c924402a8773fc8f1e
[safe/jmp/linux-2.6] / net / ipv4 / ipmr.c
1 /*
2  *      IP multicast routing support for mrouted 3.6/3.8
3  *
4  *              (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5  *        Linux Consultancy and Custom Driver Development
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  *
12  *      Fixes:
13  *      Michael Chastain        :       Incorrect size of copying.
14  *      Alan Cox                :       Added the cache manager code
15  *      Alan Cox                :       Fixed the clone/copy bug and device race.
16  *      Mike McLagan            :       Routing by source
17  *      Malcolm Beattie         :       Buffer handling fixes.
18  *      Alexey Kuznetsov        :       Double buffer free and other fixes.
19  *      SVR Anand               :       Fixed several multicast bugs and problems.
20  *      Alexey Kuznetsov        :       Status, optimisations and more.
21  *      Brad Parker             :       Better behaviour on mrouted upcall
22  *                                      overflow.
23  *      Carlos Picoto           :       PIMv1 Support
24  *      Pavlin Ivanov Radoslavov:       PIMv2 Registers must checksum only PIM header
25  *                                      Relax this requrement to work with older peers.
26  *
27  */
28
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
35 #include <linux/mm.h>
36 #include <linux/kernel.h>
37 #include <linux/fcntl.h>
38 #include <linux/stat.h>
39 #include <linux/socket.h>
40 #include <linux/in.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/igmp.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/mroute.h>
48 #include <linux/init.h>
49 #include <linux/if_ether.h>
50 #include <linux/slab.h>
51 #include <net/net_namespace.h>
52 #include <net/ip.h>
53 #include <net/protocol.h>
54 #include <linux/skbuff.h>
55 #include <net/route.h>
56 #include <net/sock.h>
57 #include <net/icmp.h>
58 #include <net/udp.h>
59 #include <net/raw.h>
60 #include <linux/notifier.h>
61 #include <linux/if_arp.h>
62 #include <linux/netfilter_ipv4.h>
63 #include <net/ipip.h>
64 #include <net/checksum.h>
65 #include <net/netlink.h>
66 #include <net/fib_rules.h>
67
68 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
69 #define CONFIG_IP_PIMSM 1
70 #endif
71
72 struct mr_table {
73         struct list_head        list;
74         u32                     id;
75         struct sock             *mroute_sk;
76         struct timer_list       ipmr_expire_timer;
77         struct list_head        mfc_unres_queue;
78         struct list_head        mfc_cache_array[MFC_LINES];
79         struct vif_device       vif_table[MAXVIFS];
80         int                     maxvif;
81         atomic_t                cache_resolve_queue_len;
82         int                     mroute_do_assert;
83         int                     mroute_do_pim;
84 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
85         int                     mroute_reg_vif_num;
86 #endif
87 };
88
89 struct ipmr_rule {
90         struct fib_rule         common;
91 };
92
93 struct ipmr_result {
94         struct mr_table         *mrt;
95 };
96
97 /* Big lock, protecting vif table, mrt cache and mroute socket state.
98    Note that the changes are semaphored via rtnl_lock.
99  */
100
101 static DEFINE_RWLOCK(mrt_lock);
102
103 /*
104  *      Multicast router control variables
105  */
106
107 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
108
109 /* Special spinlock for queue of unresolved entries */
110 static DEFINE_SPINLOCK(mfc_unres_lock);
111
112 /* We return to original Alan's scheme. Hash table of resolved
113    entries is changed only in process context and protected
114    with weak lock mrt_lock. Queue of unresolved entries is protected
115    with strong spinlock mfc_unres_lock.
116
117    In this case data path is free of exclusive locks at all.
118  */
119
120 static struct kmem_cache *mrt_cachep __read_mostly;
121
122 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
123 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
124                          struct sk_buff *skb, struct mfc_cache *cache,
125                          int local);
126 static int ipmr_cache_report(struct mr_table *mrt,
127                              struct sk_buff *pkt, vifi_t vifi, int assert);
128 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
129                             struct mfc_cache *c, struct rtmsg *rtm);
130 static void ipmr_expire_process(unsigned long arg);
131
132 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
133 #define ipmr_for_each_table(mrt, net) \
134         list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
135
136 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
137 {
138         struct mr_table *mrt;
139
140         ipmr_for_each_table(mrt, net) {
141                 if (mrt->id == id)
142                         return mrt;
143         }
144         return NULL;
145 }
146
147 static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
148                            struct mr_table **mrt)
149 {
150         struct ipmr_result res;
151         struct fib_lookup_arg arg = { .result = &res, };
152         int err;
153
154         err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg);
155         if (err < 0)
156                 return err;
157         *mrt = res.mrt;
158         return 0;
159 }
160
161 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
162                             int flags, struct fib_lookup_arg *arg)
163 {
164         struct ipmr_result *res = arg->result;
165         struct mr_table *mrt;
166
167         switch (rule->action) {
168         case FR_ACT_TO_TBL:
169                 break;
170         case FR_ACT_UNREACHABLE:
171                 return -ENETUNREACH;
172         case FR_ACT_PROHIBIT:
173                 return -EACCES;
174         case FR_ACT_BLACKHOLE:
175         default:
176                 return -EINVAL;
177         }
178
179         mrt = ipmr_get_table(rule->fr_net, rule->table);
180         if (mrt == NULL)
181                 return -EAGAIN;
182         res->mrt = mrt;
183         return 0;
184 }
185
186 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
187 {
188         return 1;
189 }
190
191 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
192         FRA_GENERIC_POLICY,
193 };
194
195 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
196                                struct fib_rule_hdr *frh, struct nlattr **tb)
197 {
198         return 0;
199 }
200
201 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
202                              struct nlattr **tb)
203 {
204         return 1;
205 }
206
207 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
208                           struct fib_rule_hdr *frh)
209 {
210         frh->dst_len = 0;
211         frh->src_len = 0;
212         frh->tos     = 0;
213         return 0;
214 }
215
216 static struct fib_rules_ops ipmr_rules_ops_template = {
217         .family         = FIB_RULES_IPMR,
218         .rule_size      = sizeof(struct ipmr_rule),
219         .addr_size      = sizeof(u32),
220         .action         = ipmr_rule_action,
221         .match          = ipmr_rule_match,
222         .configure      = ipmr_rule_configure,
223         .compare        = ipmr_rule_compare,
224         .default_pref   = fib_default_rule_pref,
225         .fill           = ipmr_rule_fill,
226         .nlgroup        = RTNLGRP_IPV4_RULE,
227         .policy         = ipmr_rule_policy,
228         .owner          = THIS_MODULE,
229 };
230
231 static int __net_init ipmr_rules_init(struct net *net)
232 {
233         struct fib_rules_ops *ops;
234         struct mr_table *mrt;
235         int err;
236
237         ops = fib_rules_register(&ipmr_rules_ops_template, net);
238         if (IS_ERR(ops))
239                 return PTR_ERR(ops);
240
241         INIT_LIST_HEAD(&net->ipv4.mr_tables);
242
243         mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
244         if (mrt == NULL) {
245                 err = -ENOMEM;
246                 goto err1;
247         }
248
249         err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
250         if (err < 0)
251                 goto err2;
252
253         net->ipv4.mr_rules_ops = ops;
254         return 0;
255
256 err2:
257         kfree(mrt);
258 err1:
259         fib_rules_unregister(ops);
260         return err;
261 }
262
263 static void __net_exit ipmr_rules_exit(struct net *net)
264 {
265         struct mr_table *mrt, *next;
266
267         list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
268                 kfree(mrt);
269         fib_rules_unregister(net->ipv4.mr_rules_ops);
270 }
271 #else
272 #define ipmr_for_each_table(mrt, net) \
273         for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
274
275 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
276 {
277         return net->ipv4.mrt;
278 }
279
280 static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
281                            struct mr_table **mrt)
282 {
283         *mrt = net->ipv4.mrt;
284         return 0;
285 }
286
287 static int __net_init ipmr_rules_init(struct net *net)
288 {
289         net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
290         return net->ipv4.mrt ? 0 : -ENOMEM;
291 }
292
293 static void __net_exit ipmr_rules_exit(struct net *net)
294 {
295         kfree(net->ipv4.mrt);
296 }
297 #endif
298
299 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
300 {
301         struct mr_table *mrt;
302         unsigned int i;
303
304         mrt = ipmr_get_table(net, id);
305         if (mrt != NULL)
306                 return mrt;
307
308         mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
309         if (mrt == NULL)
310                 return NULL;
311         mrt->id = id;
312
313         /* Forwarding cache */
314         for (i = 0; i < MFC_LINES; i++)
315                 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
316
317         INIT_LIST_HEAD(&mrt->mfc_unres_queue);
318
319         setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
320                     (unsigned long)mrt);
321
322 #ifdef CONFIG_IP_PIMSM
323         mrt->mroute_reg_vif_num = -1;
324 #endif
325 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
326         list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
327 #endif
328         return mrt;
329 }
330
331 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
332
333 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
334 {
335         struct net *net = dev_net(dev);
336
337         dev_close(dev);
338
339         dev = __dev_get_by_name(net, "tunl0");
340         if (dev) {
341                 const struct net_device_ops *ops = dev->netdev_ops;
342                 struct ifreq ifr;
343                 struct ip_tunnel_parm p;
344
345                 memset(&p, 0, sizeof(p));
346                 p.iph.daddr = v->vifc_rmt_addr.s_addr;
347                 p.iph.saddr = v->vifc_lcl_addr.s_addr;
348                 p.iph.version = 4;
349                 p.iph.ihl = 5;
350                 p.iph.protocol = IPPROTO_IPIP;
351                 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
352                 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
353
354                 if (ops->ndo_do_ioctl) {
355                         mm_segment_t oldfs = get_fs();
356
357                         set_fs(KERNEL_DS);
358                         ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
359                         set_fs(oldfs);
360                 }
361         }
362 }
363
364 static
365 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
366 {
367         struct net_device  *dev;
368
369         dev = __dev_get_by_name(net, "tunl0");
370
371         if (dev) {
372                 const struct net_device_ops *ops = dev->netdev_ops;
373                 int err;
374                 struct ifreq ifr;
375                 struct ip_tunnel_parm p;
376                 struct in_device  *in_dev;
377
378                 memset(&p, 0, sizeof(p));
379                 p.iph.daddr = v->vifc_rmt_addr.s_addr;
380                 p.iph.saddr = v->vifc_lcl_addr.s_addr;
381                 p.iph.version = 4;
382                 p.iph.ihl = 5;
383                 p.iph.protocol = IPPROTO_IPIP;
384                 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
385                 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
386
387                 if (ops->ndo_do_ioctl) {
388                         mm_segment_t oldfs = get_fs();
389
390                         set_fs(KERNEL_DS);
391                         err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
392                         set_fs(oldfs);
393                 } else
394                         err = -EOPNOTSUPP;
395
396                 dev = NULL;
397
398                 if (err == 0 &&
399                     (dev = __dev_get_by_name(net, p.name)) != NULL) {
400                         dev->flags |= IFF_MULTICAST;
401
402                         in_dev = __in_dev_get_rtnl(dev);
403                         if (in_dev == NULL)
404                                 goto failure;
405
406                         ipv4_devconf_setall(in_dev);
407                         IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
408
409                         if (dev_open(dev))
410                                 goto failure;
411                         dev_hold(dev);
412                 }
413         }
414         return dev;
415
416 failure:
417         /* allow the register to be completed before unregistering. */
418         rtnl_unlock();
419         rtnl_lock();
420
421         unregister_netdevice(dev);
422         return NULL;
423 }
424
425 #ifdef CONFIG_IP_PIMSM
426
427 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
428 {
429         struct net *net = dev_net(dev);
430         struct mr_table *mrt;
431         struct flowi fl = {
432                 .oif            = dev->ifindex,
433                 .iif            = skb->skb_iif,
434                 .mark           = skb->mark,
435         };
436         int err;
437
438         err = ipmr_fib_lookup(net, &fl, &mrt);
439         if (err < 0)
440                 return err;
441
442         read_lock(&mrt_lock);
443         dev->stats.tx_bytes += skb->len;
444         dev->stats.tx_packets++;
445         ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
446         read_unlock(&mrt_lock);
447         kfree_skb(skb);
448         return NETDEV_TX_OK;
449 }
450
451 static const struct net_device_ops reg_vif_netdev_ops = {
452         .ndo_start_xmit = reg_vif_xmit,
453 };
454
455 static void reg_vif_setup(struct net_device *dev)
456 {
457         dev->type               = ARPHRD_PIMREG;
458         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
459         dev->flags              = IFF_NOARP;
460         dev->netdev_ops         = &reg_vif_netdev_ops,
461         dev->destructor         = free_netdev;
462         dev->features           |= NETIF_F_NETNS_LOCAL;
463 }
464
465 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
466 {
467         struct net_device *dev;
468         struct in_device *in_dev;
469         char name[IFNAMSIZ];
470
471         if (mrt->id == RT_TABLE_DEFAULT)
472                 sprintf(name, "pimreg");
473         else
474                 sprintf(name, "pimreg%u", mrt->id);
475
476         dev = alloc_netdev(0, name, reg_vif_setup);
477
478         if (dev == NULL)
479                 return NULL;
480
481         dev_net_set(dev, net);
482
483         if (register_netdevice(dev)) {
484                 free_netdev(dev);
485                 return NULL;
486         }
487         dev->iflink = 0;
488
489         rcu_read_lock();
490         if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
491                 rcu_read_unlock();
492                 goto failure;
493         }
494
495         ipv4_devconf_setall(in_dev);
496         IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
497         rcu_read_unlock();
498
499         if (dev_open(dev))
500                 goto failure;
501
502         dev_hold(dev);
503
504         return dev;
505
506 failure:
507         /* allow the register to be completed before unregistering. */
508         rtnl_unlock();
509         rtnl_lock();
510
511         unregister_netdevice(dev);
512         return NULL;
513 }
514 #endif
515
516 /*
517  *      Delete a VIF entry
518  *      @notify: Set to 1, if the caller is a notifier_call
519  */
520
521 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
522                       struct list_head *head)
523 {
524         struct vif_device *v;
525         struct net_device *dev;
526         struct in_device *in_dev;
527
528         if (vifi < 0 || vifi >= mrt->maxvif)
529                 return -EADDRNOTAVAIL;
530
531         v = &mrt->vif_table[vifi];
532
533         write_lock_bh(&mrt_lock);
534         dev = v->dev;
535         v->dev = NULL;
536
537         if (!dev) {
538                 write_unlock_bh(&mrt_lock);
539                 return -EADDRNOTAVAIL;
540         }
541
542 #ifdef CONFIG_IP_PIMSM
543         if (vifi == mrt->mroute_reg_vif_num)
544                 mrt->mroute_reg_vif_num = -1;
545 #endif
546
547         if (vifi+1 == mrt->maxvif) {
548                 int tmp;
549                 for (tmp=vifi-1; tmp>=0; tmp--) {
550                         if (VIF_EXISTS(mrt, tmp))
551                                 break;
552                 }
553                 mrt->maxvif = tmp+1;
554         }
555
556         write_unlock_bh(&mrt_lock);
557
558         dev_set_allmulti(dev, -1);
559
560         if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
561                 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
562                 ip_rt_multicast_event(in_dev);
563         }
564
565         if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
566                 unregister_netdevice_queue(dev, head);
567
568         dev_put(dev);
569         return 0;
570 }
571
572 static inline void ipmr_cache_free(struct mfc_cache *c)
573 {
574         kmem_cache_free(mrt_cachep, c);
575 }
576
577 /* Destroy an unresolved cache entry, killing queued skbs
578    and reporting error to netlink readers.
579  */
580
581 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
582 {
583         struct net *net = NULL; //mrt->net;
584         struct sk_buff *skb;
585         struct nlmsgerr *e;
586
587         atomic_dec(&mrt->cache_resolve_queue_len);
588
589         while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
590                 if (ip_hdr(skb)->version == 0) {
591                         struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
592                         nlh->nlmsg_type = NLMSG_ERROR;
593                         nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
594                         skb_trim(skb, nlh->nlmsg_len);
595                         e = NLMSG_DATA(nlh);
596                         e->error = -ETIMEDOUT;
597                         memset(&e->msg, 0, sizeof(e->msg));
598
599                         rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
600                 } else
601                         kfree_skb(skb);
602         }
603
604         ipmr_cache_free(c);
605 }
606
607
608 /* Timer process for the unresolved queue. */
609
610 static void ipmr_expire_process(unsigned long arg)
611 {
612         struct mr_table *mrt = (struct mr_table *)arg;
613         unsigned long now;
614         unsigned long expires;
615         struct mfc_cache *c, *next;
616
617         if (!spin_trylock(&mfc_unres_lock)) {
618                 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
619                 return;
620         }
621
622         if (list_empty(&mrt->mfc_unres_queue))
623                 goto out;
624
625         now = jiffies;
626         expires = 10*HZ;
627
628         list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
629                 if (time_after(c->mfc_un.unres.expires, now)) {
630                         unsigned long interval = c->mfc_un.unres.expires - now;
631                         if (interval < expires)
632                                 expires = interval;
633                         continue;
634                 }
635
636                 list_del(&c->list);
637                 ipmr_destroy_unres(mrt, c);
638         }
639
640         if (!list_empty(&mrt->mfc_unres_queue))
641                 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
642
643 out:
644         spin_unlock(&mfc_unres_lock);
645 }
646
647 /* Fill oifs list. It is called under write locked mrt_lock. */
648
649 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
650                                    unsigned char *ttls)
651 {
652         int vifi;
653
654         cache->mfc_un.res.minvif = MAXVIFS;
655         cache->mfc_un.res.maxvif = 0;
656         memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
657
658         for (vifi = 0; vifi < mrt->maxvif; vifi++) {
659                 if (VIF_EXISTS(mrt, vifi) &&
660                     ttls[vifi] && ttls[vifi] < 255) {
661                         cache->mfc_un.res.ttls[vifi] = ttls[vifi];
662                         if (cache->mfc_un.res.minvif > vifi)
663                                 cache->mfc_un.res.minvif = vifi;
664                         if (cache->mfc_un.res.maxvif <= vifi)
665                                 cache->mfc_un.res.maxvif = vifi + 1;
666                 }
667         }
668 }
669
670 static int vif_add(struct net *net, struct mr_table *mrt,
671                    struct vifctl *vifc, int mrtsock)
672 {
673         int vifi = vifc->vifc_vifi;
674         struct vif_device *v = &mrt->vif_table[vifi];
675         struct net_device *dev;
676         struct in_device *in_dev;
677         int err;
678
679         /* Is vif busy ? */
680         if (VIF_EXISTS(mrt, vifi))
681                 return -EADDRINUSE;
682
683         switch (vifc->vifc_flags) {
684 #ifdef CONFIG_IP_PIMSM
685         case VIFF_REGISTER:
686                 /*
687                  * Special Purpose VIF in PIM
688                  * All the packets will be sent to the daemon
689                  */
690                 if (mrt->mroute_reg_vif_num >= 0)
691                         return -EADDRINUSE;
692                 dev = ipmr_reg_vif(net, mrt);
693                 if (!dev)
694                         return -ENOBUFS;
695                 err = dev_set_allmulti(dev, 1);
696                 if (err) {
697                         unregister_netdevice(dev);
698                         dev_put(dev);
699                         return err;
700                 }
701                 break;
702 #endif
703         case VIFF_TUNNEL:
704                 dev = ipmr_new_tunnel(net, vifc);
705                 if (!dev)
706                         return -ENOBUFS;
707                 err = dev_set_allmulti(dev, 1);
708                 if (err) {
709                         ipmr_del_tunnel(dev, vifc);
710                         dev_put(dev);
711                         return err;
712                 }
713                 break;
714
715         case VIFF_USE_IFINDEX:
716         case 0:
717                 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
718                         dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
719                         if (dev && dev->ip_ptr == NULL) {
720                                 dev_put(dev);
721                                 return -EADDRNOTAVAIL;
722                         }
723                 } else
724                         dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
725
726                 if (!dev)
727                         return -EADDRNOTAVAIL;
728                 err = dev_set_allmulti(dev, 1);
729                 if (err) {
730                         dev_put(dev);
731                         return err;
732                 }
733                 break;
734         default:
735                 return -EINVAL;
736         }
737
738         if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
739                 dev_put(dev);
740                 return -EADDRNOTAVAIL;
741         }
742         IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
743         ip_rt_multicast_event(in_dev);
744
745         /*
746          *      Fill in the VIF structures
747          */
748         v->rate_limit = vifc->vifc_rate_limit;
749         v->local = vifc->vifc_lcl_addr.s_addr;
750         v->remote = vifc->vifc_rmt_addr.s_addr;
751         v->flags = vifc->vifc_flags;
752         if (!mrtsock)
753                 v->flags |= VIFF_STATIC;
754         v->threshold = vifc->vifc_threshold;
755         v->bytes_in = 0;
756         v->bytes_out = 0;
757         v->pkt_in = 0;
758         v->pkt_out = 0;
759         v->link = dev->ifindex;
760         if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
761                 v->link = dev->iflink;
762
763         /* And finish update writing critical data */
764         write_lock_bh(&mrt_lock);
765         v->dev = dev;
766 #ifdef CONFIG_IP_PIMSM
767         if (v->flags&VIFF_REGISTER)
768                 mrt->mroute_reg_vif_num = vifi;
769 #endif
770         if (vifi+1 > mrt->maxvif)
771                 mrt->maxvif = vifi+1;
772         write_unlock_bh(&mrt_lock);
773         return 0;
774 }
775
776 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
777                                          __be32 origin,
778                                          __be32 mcastgrp)
779 {
780         int line = MFC_HASH(mcastgrp, origin);
781         struct mfc_cache *c;
782
783         list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
784                 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
785                         return c;
786         }
787         return NULL;
788 }
789
790 /*
791  *      Allocate a multicast cache entry
792  */
793 static struct mfc_cache *ipmr_cache_alloc(void)
794 {
795         struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
796         if (c == NULL)
797                 return NULL;
798         c->mfc_un.res.minvif = MAXVIFS;
799         return c;
800 }
801
802 static struct mfc_cache *ipmr_cache_alloc_unres(void)
803 {
804         struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
805         if (c == NULL)
806                 return NULL;
807         skb_queue_head_init(&c->mfc_un.unres.unresolved);
808         c->mfc_un.unres.expires = jiffies + 10*HZ;
809         return c;
810 }
811
812 /*
813  *      A cache entry has gone into a resolved state from queued
814  */
815
816 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
817                                struct mfc_cache *uc, struct mfc_cache *c)
818 {
819         struct sk_buff *skb;
820         struct nlmsgerr *e;
821
822         /*
823          *      Play the pending entries through our router
824          */
825
826         while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
827                 if (ip_hdr(skb)->version == 0) {
828                         struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
829
830                         if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
831                                 nlh->nlmsg_len = (skb_tail_pointer(skb) -
832                                                   (u8 *)nlh);
833                         } else {
834                                 nlh->nlmsg_type = NLMSG_ERROR;
835                                 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
836                                 skb_trim(skb, nlh->nlmsg_len);
837                                 e = NLMSG_DATA(nlh);
838                                 e->error = -EMSGSIZE;
839                                 memset(&e->msg, 0, sizeof(e->msg));
840                         }
841
842                         rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
843                 } else
844                         ip_mr_forward(net, mrt, skb, c, 0);
845         }
846 }
847
848 /*
849  *      Bounce a cache query up to mrouted. We could use netlink for this but mrouted
850  *      expects the following bizarre scheme.
851  *
852  *      Called under mrt_lock.
853  */
854
855 static int ipmr_cache_report(struct mr_table *mrt,
856                              struct sk_buff *pkt, vifi_t vifi, int assert)
857 {
858         struct sk_buff *skb;
859         const int ihl = ip_hdrlen(pkt);
860         struct igmphdr *igmp;
861         struct igmpmsg *msg;
862         int ret;
863
864 #ifdef CONFIG_IP_PIMSM
865         if (assert == IGMPMSG_WHOLEPKT)
866                 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
867         else
868 #endif
869                 skb = alloc_skb(128, GFP_ATOMIC);
870
871         if (!skb)
872                 return -ENOBUFS;
873
874 #ifdef CONFIG_IP_PIMSM
875         if (assert == IGMPMSG_WHOLEPKT) {
876                 /* Ugly, but we have no choice with this interface.
877                    Duplicate old header, fix ihl, length etc.
878                    And all this only to mangle msg->im_msgtype and
879                    to set msg->im_mbz to "mbz" :-)
880                  */
881                 skb_push(skb, sizeof(struct iphdr));
882                 skb_reset_network_header(skb);
883                 skb_reset_transport_header(skb);
884                 msg = (struct igmpmsg *)skb_network_header(skb);
885                 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
886                 msg->im_msgtype = IGMPMSG_WHOLEPKT;
887                 msg->im_mbz = 0;
888                 msg->im_vif = mrt->mroute_reg_vif_num;
889                 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
890                 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
891                                              sizeof(struct iphdr));
892         } else
893 #endif
894         {
895
896         /*
897          *      Copy the IP header
898          */
899
900         skb->network_header = skb->tail;
901         skb_put(skb, ihl);
902         skb_copy_to_linear_data(skb, pkt->data, ihl);
903         ip_hdr(skb)->protocol = 0;                      /* Flag to the kernel this is a route add */
904         msg = (struct igmpmsg *)skb_network_header(skb);
905         msg->im_vif = vifi;
906         skb_dst_set(skb, dst_clone(skb_dst(pkt)));
907
908         /*
909          *      Add our header
910          */
911
912         igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
913         igmp->type      =
914         msg->im_msgtype = assert;
915         igmp->code      =       0;
916         ip_hdr(skb)->tot_len = htons(skb->len);                 /* Fix the length */
917         skb->transport_header = skb->network_header;
918         }
919
920         if (mrt->mroute_sk == NULL) {
921                 kfree_skb(skb);
922                 return -EINVAL;
923         }
924
925         /*
926          *      Deliver to mrouted
927          */
928         ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
929         if (ret < 0) {
930                 if (net_ratelimit())
931                         printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
932                 kfree_skb(skb);
933         }
934
935         return ret;
936 }
937
938 /*
939  *      Queue a packet for resolution. It gets locked cache entry!
940  */
941
942 static int
943 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
944 {
945         bool found = false;
946         int err;
947         struct mfc_cache *c;
948         const struct iphdr *iph = ip_hdr(skb);
949
950         spin_lock_bh(&mfc_unres_lock);
951         list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
952                 if (c->mfc_mcastgrp == iph->daddr &&
953                     c->mfc_origin == iph->saddr) {
954                         found = true;
955                         break;
956                 }
957         }
958
959         if (!found) {
960                 /*
961                  *      Create a new entry if allowable
962                  */
963
964                 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
965                     (c = ipmr_cache_alloc_unres()) == NULL) {
966                         spin_unlock_bh(&mfc_unres_lock);
967
968                         kfree_skb(skb);
969                         return -ENOBUFS;
970                 }
971
972                 /*
973                  *      Fill in the new cache entry
974                  */
975                 c->mfc_parent   = -1;
976                 c->mfc_origin   = iph->saddr;
977                 c->mfc_mcastgrp = iph->daddr;
978
979                 /*
980                  *      Reflect first query at mrouted.
981                  */
982                 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
983                 if (err < 0) {
984                         /* If the report failed throw the cache entry
985                            out - Brad Parker
986                          */
987                         spin_unlock_bh(&mfc_unres_lock);
988
989                         ipmr_cache_free(c);
990                         kfree_skb(skb);
991                         return err;
992                 }
993
994                 atomic_inc(&mrt->cache_resolve_queue_len);
995                 list_add(&c->list, &mrt->mfc_unres_queue);
996
997                 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
998         }
999
1000         /*
1001          *      See if we can append the packet
1002          */
1003         if (c->mfc_un.unres.unresolved.qlen>3) {
1004                 kfree_skb(skb);
1005                 err = -ENOBUFS;
1006         } else {
1007                 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1008                 err = 0;
1009         }
1010
1011         spin_unlock_bh(&mfc_unres_lock);
1012         return err;
1013 }
1014
1015 /*
1016  *      MFC cache manipulation by user space mroute daemon
1017  */
1018
1019 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1020 {
1021         int line;
1022         struct mfc_cache *c, *next;
1023
1024         line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1025
1026         list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1027                 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1028                     c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1029                         write_lock_bh(&mrt_lock);
1030                         list_del(&c->list);
1031                         write_unlock_bh(&mrt_lock);
1032
1033                         ipmr_cache_free(c);
1034                         return 0;
1035                 }
1036         }
1037         return -ENOENT;
1038 }
1039
1040 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1041                         struct mfcctl *mfc, int mrtsock)
1042 {
1043         bool found = false;
1044         int line;
1045         struct mfc_cache *uc, *c;
1046
1047         if (mfc->mfcc_parent >= MAXVIFS)
1048                 return -ENFILE;
1049
1050         line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1051
1052         list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1053                 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1054                     c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1055                         found = true;
1056                         break;
1057                 }
1058         }
1059
1060         if (found) {
1061                 write_lock_bh(&mrt_lock);
1062                 c->mfc_parent = mfc->mfcc_parent;
1063                 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1064                 if (!mrtsock)
1065                         c->mfc_flags |= MFC_STATIC;
1066                 write_unlock_bh(&mrt_lock);
1067                 return 0;
1068         }
1069
1070         if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1071                 return -EINVAL;
1072
1073         c = ipmr_cache_alloc();
1074         if (c == NULL)
1075                 return -ENOMEM;
1076
1077         c->mfc_origin = mfc->mfcc_origin.s_addr;
1078         c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1079         c->mfc_parent = mfc->mfcc_parent;
1080         ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1081         if (!mrtsock)
1082                 c->mfc_flags |= MFC_STATIC;
1083
1084         write_lock_bh(&mrt_lock);
1085         list_add(&c->list, &mrt->mfc_cache_array[line]);
1086         write_unlock_bh(&mrt_lock);
1087
1088         /*
1089          *      Check to see if we resolved a queued list. If so we
1090          *      need to send on the frames and tidy up.
1091          */
1092         found = false;
1093         spin_lock_bh(&mfc_unres_lock);
1094         list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1095                 if (uc->mfc_origin == c->mfc_origin &&
1096                     uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1097                         list_del(&uc->list);
1098                         atomic_dec(&mrt->cache_resolve_queue_len);
1099                         found = true;
1100                         break;
1101                 }
1102         }
1103         if (list_empty(&mrt->mfc_unres_queue))
1104                 del_timer(&mrt->ipmr_expire_timer);
1105         spin_unlock_bh(&mfc_unres_lock);
1106
1107         if (found) {
1108                 ipmr_cache_resolve(net, mrt, uc, c);
1109                 ipmr_cache_free(uc);
1110         }
1111         return 0;
1112 }
1113
1114 /*
1115  *      Close the multicast socket, and clear the vif tables etc
1116  */
1117
1118 static void mroute_clean_tables(struct mr_table *mrt)
1119 {
1120         int i;
1121         LIST_HEAD(list);
1122         struct mfc_cache *c, *next;
1123
1124         /*
1125          *      Shut down all active vif entries
1126          */
1127         for (i = 0; i < mrt->maxvif; i++) {
1128                 if (!(mrt->vif_table[i].flags&VIFF_STATIC))
1129                         vif_delete(mrt, i, 0, &list);
1130         }
1131         unregister_netdevice_many(&list);
1132
1133         /*
1134          *      Wipe the cache
1135          */
1136         for (i = 0; i < MFC_LINES; i++) {
1137                 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1138                         if (c->mfc_flags&MFC_STATIC)
1139                                 continue;
1140                         write_lock_bh(&mrt_lock);
1141                         list_del(&c->list);
1142                         write_unlock_bh(&mrt_lock);
1143
1144                         ipmr_cache_free(c);
1145                 }
1146         }
1147
1148         if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1149                 spin_lock_bh(&mfc_unres_lock);
1150                 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1151                         list_del(&c->list);
1152                         ipmr_destroy_unres(mrt, c);
1153                 }
1154                 spin_unlock_bh(&mfc_unres_lock);
1155         }
1156 }
1157
1158 static void mrtsock_destruct(struct sock *sk)
1159 {
1160         struct net *net = sock_net(sk);
1161         struct mr_table *mrt;
1162
1163         rtnl_lock();
1164         ipmr_for_each_table(mrt, net) {
1165                 if (sk == mrt->mroute_sk) {
1166                         IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1167
1168                         write_lock_bh(&mrt_lock);
1169                         mrt->mroute_sk = NULL;
1170                         write_unlock_bh(&mrt_lock);
1171
1172                         mroute_clean_tables(mrt);
1173                 }
1174         }
1175         rtnl_unlock();
1176 }
1177
1178 /*
1179  *      Socket options and virtual interface manipulation. The whole
1180  *      virtual interface system is a complete heap, but unfortunately
1181  *      that's how BSD mrouted happens to think. Maybe one day with a proper
1182  *      MOSPF/PIM router set up we can clean this up.
1183  */
1184
1185 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1186 {
1187         int ret;
1188         struct vifctl vif;
1189         struct mfcctl mfc;
1190         struct net *net = sock_net(sk);
1191         struct mr_table *mrt;
1192
1193         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1194         if (mrt == NULL)
1195                 return -ENOENT;
1196
1197         if (optname != MRT_INIT) {
1198                 if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
1199                         return -EACCES;
1200         }
1201
1202         switch (optname) {
1203         case MRT_INIT:
1204                 if (sk->sk_type != SOCK_RAW ||
1205                     inet_sk(sk)->inet_num != IPPROTO_IGMP)
1206                         return -EOPNOTSUPP;
1207                 if (optlen != sizeof(int))
1208                         return -ENOPROTOOPT;
1209
1210                 rtnl_lock();
1211                 if (mrt->mroute_sk) {
1212                         rtnl_unlock();
1213                         return -EADDRINUSE;
1214                 }
1215
1216                 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1217                 if (ret == 0) {
1218                         write_lock_bh(&mrt_lock);
1219                         mrt->mroute_sk = sk;
1220                         write_unlock_bh(&mrt_lock);
1221
1222                         IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1223                 }
1224                 rtnl_unlock();
1225                 return ret;
1226         case MRT_DONE:
1227                 if (sk != mrt->mroute_sk)
1228                         return -EACCES;
1229                 return ip_ra_control(sk, 0, NULL);
1230         case MRT_ADD_VIF:
1231         case MRT_DEL_VIF:
1232                 if (optlen != sizeof(vif))
1233                         return -EINVAL;
1234                 if (copy_from_user(&vif, optval, sizeof(vif)))
1235                         return -EFAULT;
1236                 if (vif.vifc_vifi >= MAXVIFS)
1237                         return -ENFILE;
1238                 rtnl_lock();
1239                 if (optname == MRT_ADD_VIF) {
1240                         ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
1241                 } else {
1242                         ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1243                 }
1244                 rtnl_unlock();
1245                 return ret;
1246
1247                 /*
1248                  *      Manipulate the forwarding caches. These live
1249                  *      in a sort of kernel/user symbiosis.
1250                  */
1251         case MRT_ADD_MFC:
1252         case MRT_DEL_MFC:
1253                 if (optlen != sizeof(mfc))
1254                         return -EINVAL;
1255                 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1256                         return -EFAULT;
1257                 rtnl_lock();
1258                 if (optname == MRT_DEL_MFC)
1259                         ret = ipmr_mfc_delete(mrt, &mfc);
1260                 else
1261                         ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
1262                 rtnl_unlock();
1263                 return ret;
1264                 /*
1265                  *      Control PIM assert.
1266                  */
1267         case MRT_ASSERT:
1268         {
1269                 int v;
1270                 if (get_user(v,(int __user *)optval))
1271                         return -EFAULT;
1272                 mrt->mroute_do_assert = (v) ? 1 : 0;
1273                 return 0;
1274         }
1275 #ifdef CONFIG_IP_PIMSM
1276         case MRT_PIM:
1277         {
1278                 int v;
1279
1280                 if (get_user(v,(int __user *)optval))
1281                         return -EFAULT;
1282                 v = (v) ? 1 : 0;
1283
1284                 rtnl_lock();
1285                 ret = 0;
1286                 if (v != mrt->mroute_do_pim) {
1287                         mrt->mroute_do_pim = v;
1288                         mrt->mroute_do_assert = v;
1289                 }
1290                 rtnl_unlock();
1291                 return ret;
1292         }
1293 #endif
1294 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1295         case MRT_TABLE:
1296         {
1297                 u32 v;
1298
1299                 if (optlen != sizeof(u32))
1300                         return -EINVAL;
1301                 if (get_user(v, (u32 __user *)optval))
1302                         return -EFAULT;
1303                 if (sk == mrt->mroute_sk)
1304                         return -EBUSY;
1305
1306                 rtnl_lock();
1307                 ret = 0;
1308                 if (!ipmr_new_table(net, v))
1309                         ret = -ENOMEM;
1310                 raw_sk(sk)->ipmr_table = v;
1311                 rtnl_unlock();
1312                 return ret;
1313         }
1314 #endif
1315         /*
1316          *      Spurious command, or MRT_VERSION which you cannot
1317          *      set.
1318          */
1319         default:
1320                 return -ENOPROTOOPT;
1321         }
1322 }
1323
1324 /*
1325  *      Getsock opt support for the multicast routing system.
1326  */
1327
1328 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1329 {
1330         int olr;
1331         int val;
1332         struct net *net = sock_net(sk);
1333         struct mr_table *mrt;
1334
1335         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1336         if (mrt == NULL)
1337                 return -ENOENT;
1338
1339         if (optname != MRT_VERSION &&
1340 #ifdef CONFIG_IP_PIMSM
1341            optname!=MRT_PIM &&
1342 #endif
1343            optname!=MRT_ASSERT)
1344                 return -ENOPROTOOPT;
1345
1346         if (get_user(olr, optlen))
1347                 return -EFAULT;
1348
1349         olr = min_t(unsigned int, olr, sizeof(int));
1350         if (olr < 0)
1351                 return -EINVAL;
1352
1353         if (put_user(olr, optlen))
1354                 return -EFAULT;
1355         if (optname == MRT_VERSION)
1356                 val = 0x0305;
1357 #ifdef CONFIG_IP_PIMSM
1358         else if (optname == MRT_PIM)
1359                 val = mrt->mroute_do_pim;
1360 #endif
1361         else
1362                 val = mrt->mroute_do_assert;
1363         if (copy_to_user(optval, &val, olr))
1364                 return -EFAULT;
1365         return 0;
1366 }
1367
1368 /*
1369  *      The IP multicast ioctl support routines.
1370  */
1371
1372 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1373 {
1374         struct sioc_sg_req sr;
1375         struct sioc_vif_req vr;
1376         struct vif_device *vif;
1377         struct mfc_cache *c;
1378         struct net *net = sock_net(sk);
1379         struct mr_table *mrt;
1380
1381         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1382         if (mrt == NULL)
1383                 return -ENOENT;
1384
1385         switch (cmd) {
1386         case SIOCGETVIFCNT:
1387                 if (copy_from_user(&vr, arg, sizeof(vr)))
1388                         return -EFAULT;
1389                 if (vr.vifi >= mrt->maxvif)
1390                         return -EINVAL;
1391                 read_lock(&mrt_lock);
1392                 vif = &mrt->vif_table[vr.vifi];
1393                 if (VIF_EXISTS(mrt, vr.vifi)) {
1394                         vr.icount = vif->pkt_in;
1395                         vr.ocount = vif->pkt_out;
1396                         vr.ibytes = vif->bytes_in;
1397                         vr.obytes = vif->bytes_out;
1398                         read_unlock(&mrt_lock);
1399
1400                         if (copy_to_user(arg, &vr, sizeof(vr)))
1401                                 return -EFAULT;
1402                         return 0;
1403                 }
1404                 read_unlock(&mrt_lock);
1405                 return -EADDRNOTAVAIL;
1406         case SIOCGETSGCNT:
1407                 if (copy_from_user(&sr, arg, sizeof(sr)))
1408                         return -EFAULT;
1409
1410                 read_lock(&mrt_lock);
1411                 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1412                 if (c) {
1413                         sr.pktcnt = c->mfc_un.res.pkt;
1414                         sr.bytecnt = c->mfc_un.res.bytes;
1415                         sr.wrong_if = c->mfc_un.res.wrong_if;
1416                         read_unlock(&mrt_lock);
1417
1418                         if (copy_to_user(arg, &sr, sizeof(sr)))
1419                                 return -EFAULT;
1420                         return 0;
1421                 }
1422                 read_unlock(&mrt_lock);
1423                 return -EADDRNOTAVAIL;
1424         default:
1425                 return -ENOIOCTLCMD;
1426         }
1427 }
1428
1429
1430 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1431 {
1432         struct net_device *dev = ptr;
1433         struct net *net = dev_net(dev);
1434         struct mr_table *mrt;
1435         struct vif_device *v;
1436         int ct;
1437         LIST_HEAD(list);
1438
1439         if (event != NETDEV_UNREGISTER)
1440                 return NOTIFY_DONE;
1441
1442         ipmr_for_each_table(mrt, net) {
1443                 v = &mrt->vif_table[0];
1444                 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1445                         if (v->dev == dev)
1446                                 vif_delete(mrt, ct, 1, &list);
1447                 }
1448         }
1449         unregister_netdevice_many(&list);
1450         return NOTIFY_DONE;
1451 }
1452
1453
1454 static struct notifier_block ip_mr_notifier = {
1455         .notifier_call = ipmr_device_event,
1456 };
1457
1458 /*
1459  *      Encapsulate a packet by attaching a valid IPIP header to it.
1460  *      This avoids tunnel drivers and other mess and gives us the speed so
1461  *      important for multicast video.
1462  */
1463
1464 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1465 {
1466         struct iphdr *iph;
1467         struct iphdr *old_iph = ip_hdr(skb);
1468
1469         skb_push(skb, sizeof(struct iphdr));
1470         skb->transport_header = skb->network_header;
1471         skb_reset_network_header(skb);
1472         iph = ip_hdr(skb);
1473
1474         iph->version    =       4;
1475         iph->tos        =       old_iph->tos;
1476         iph->ttl        =       old_iph->ttl;
1477         iph->frag_off   =       0;
1478         iph->daddr      =       daddr;
1479         iph->saddr      =       saddr;
1480         iph->protocol   =       IPPROTO_IPIP;
1481         iph->ihl        =       5;
1482         iph->tot_len    =       htons(skb->len);
1483         ip_select_ident(iph, skb_dst(skb), NULL);
1484         ip_send_check(iph);
1485
1486         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1487         nf_reset(skb);
1488 }
1489
1490 static inline int ipmr_forward_finish(struct sk_buff *skb)
1491 {
1492         struct ip_options * opt = &(IPCB(skb)->opt);
1493
1494         IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1495
1496         if (unlikely(opt->optlen))
1497                 ip_forward_options(skb);
1498
1499         return dst_output(skb);
1500 }
1501
1502 /*
1503  *      Processing handlers for ipmr_forward
1504  */
1505
1506 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1507                             struct sk_buff *skb, struct mfc_cache *c, int vifi)
1508 {
1509         const struct iphdr *iph = ip_hdr(skb);
1510         struct vif_device *vif = &mrt->vif_table[vifi];
1511         struct net_device *dev;
1512         struct rtable *rt;
1513         int    encap = 0;
1514
1515         if (vif->dev == NULL)
1516                 goto out_free;
1517
1518 #ifdef CONFIG_IP_PIMSM
1519         if (vif->flags & VIFF_REGISTER) {
1520                 vif->pkt_out++;
1521                 vif->bytes_out += skb->len;
1522                 vif->dev->stats.tx_bytes += skb->len;
1523                 vif->dev->stats.tx_packets++;
1524                 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1525                 goto out_free;
1526         }
1527 #endif
1528
1529         if (vif->flags&VIFF_TUNNEL) {
1530                 struct flowi fl = { .oif = vif->link,
1531                                     .nl_u = { .ip4_u =
1532                                               { .daddr = vif->remote,
1533                                                 .saddr = vif->local,
1534                                                 .tos = RT_TOS(iph->tos) } },
1535                                     .proto = IPPROTO_IPIP };
1536                 if (ip_route_output_key(net, &rt, &fl))
1537                         goto out_free;
1538                 encap = sizeof(struct iphdr);
1539         } else {
1540                 struct flowi fl = { .oif = vif->link,
1541                                     .nl_u = { .ip4_u =
1542                                               { .daddr = iph->daddr,
1543                                                 .tos = RT_TOS(iph->tos) } },
1544                                     .proto = IPPROTO_IPIP };
1545                 if (ip_route_output_key(net, &rt, &fl))
1546                         goto out_free;
1547         }
1548
1549         dev = rt->u.dst.dev;
1550
1551         if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1552                 /* Do not fragment multicasts. Alas, IPv4 does not
1553                    allow to send ICMP, so that packets will disappear
1554                    to blackhole.
1555                  */
1556
1557                 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1558                 ip_rt_put(rt);
1559                 goto out_free;
1560         }
1561
1562         encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1563
1564         if (skb_cow(skb, encap)) {
1565                 ip_rt_put(rt);
1566                 goto out_free;
1567         }
1568
1569         vif->pkt_out++;
1570         vif->bytes_out += skb->len;
1571
1572         skb_dst_drop(skb);
1573         skb_dst_set(skb, &rt->u.dst);
1574         ip_decrease_ttl(ip_hdr(skb));
1575
1576         /* FIXME: forward and output firewalls used to be called here.
1577          * What do we do with netfilter? -- RR */
1578         if (vif->flags & VIFF_TUNNEL) {
1579                 ip_encap(skb, vif->local, vif->remote);
1580                 /* FIXME: extra output firewall step used to be here. --RR */
1581                 vif->dev->stats.tx_packets++;
1582                 vif->dev->stats.tx_bytes += skb->len;
1583         }
1584
1585         IPCB(skb)->flags |= IPSKB_FORWARDED;
1586
1587         /*
1588          * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1589          * not only before forwarding, but after forwarding on all output
1590          * interfaces. It is clear, if mrouter runs a multicasting
1591          * program, it should receive packets not depending to what interface
1592          * program is joined.
1593          * If we will not make it, the program will have to join on all
1594          * interfaces. On the other hand, multihoming host (or router, but
1595          * not mrouter) cannot join to more than one interface - it will
1596          * result in receiving multiple packets.
1597          */
1598         NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
1599                 ipmr_forward_finish);
1600         return;
1601
1602 out_free:
1603         kfree_skb(skb);
1604         return;
1605 }
1606
1607 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1608 {
1609         int ct;
1610
1611         for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1612                 if (mrt->vif_table[ct].dev == dev)
1613                         break;
1614         }
1615         return ct;
1616 }
1617
1618 /* "local" means that we should preserve one skb (for local delivery) */
1619
1620 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1621                          struct sk_buff *skb, struct mfc_cache *cache,
1622                          int local)
1623 {
1624         int psend = -1;
1625         int vif, ct;
1626
1627         vif = cache->mfc_parent;
1628         cache->mfc_un.res.pkt++;
1629         cache->mfc_un.res.bytes += skb->len;
1630
1631         /*
1632          * Wrong interface: drop packet and (maybe) send PIM assert.
1633          */
1634         if (mrt->vif_table[vif].dev != skb->dev) {
1635                 int true_vifi;
1636
1637                 if (skb_rtable(skb)->fl.iif == 0) {
1638                         /* It is our own packet, looped back.
1639                            Very complicated situation...
1640
1641                            The best workaround until routing daemons will be
1642                            fixed is not to redistribute packet, if it was
1643                            send through wrong interface. It means, that
1644                            multicast applications WILL NOT work for
1645                            (S,G), which have default multicast route pointing
1646                            to wrong oif. In any case, it is not a good
1647                            idea to use multicasting applications on router.
1648                          */
1649                         goto dont_forward;
1650                 }
1651
1652                 cache->mfc_un.res.wrong_if++;
1653                 true_vifi = ipmr_find_vif(mrt, skb->dev);
1654
1655                 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1656                     /* pimsm uses asserts, when switching from RPT to SPT,
1657                        so that we cannot check that packet arrived on an oif.
1658                        It is bad, but otherwise we would need to move pretty
1659                        large chunk of pimd to kernel. Ough... --ANK
1660                      */
1661                     (mrt->mroute_do_pim ||
1662                      cache->mfc_un.res.ttls[true_vifi] < 255) &&
1663                     time_after(jiffies,
1664                                cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1665                         cache->mfc_un.res.last_assert = jiffies;
1666                         ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1667                 }
1668                 goto dont_forward;
1669         }
1670
1671         mrt->vif_table[vif].pkt_in++;
1672         mrt->vif_table[vif].bytes_in += skb->len;
1673
1674         /*
1675          *      Forward the frame
1676          */
1677         for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
1678                 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1679                         if (psend != -1) {
1680                                 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1681                                 if (skb2)
1682                                         ipmr_queue_xmit(net, mrt, skb2, cache,
1683                                                         psend);
1684                         }
1685                         psend = ct;
1686                 }
1687         }
1688         if (psend != -1) {
1689                 if (local) {
1690                         struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1691                         if (skb2)
1692                                 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1693                 } else {
1694                         ipmr_queue_xmit(net, mrt, skb, cache, psend);
1695                         return 0;
1696                 }
1697         }
1698
1699 dont_forward:
1700         if (!local)
1701                 kfree_skb(skb);
1702         return 0;
1703 }
1704
1705
1706 /*
1707  *      Multicast packets for forwarding arrive here
1708  */
1709
1710 int ip_mr_input(struct sk_buff *skb)
1711 {
1712         struct mfc_cache *cache;
1713         struct net *net = dev_net(skb->dev);
1714         int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1715         struct mr_table *mrt;
1716         int err;
1717
1718         /* Packet is looped back after forward, it should not be
1719            forwarded second time, but still can be delivered locally.
1720          */
1721         if (IPCB(skb)->flags&IPSKB_FORWARDED)
1722                 goto dont_forward;
1723
1724         err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
1725         if (err < 0)
1726                 return err;
1727
1728         if (!local) {
1729                     if (IPCB(skb)->opt.router_alert) {
1730                             if (ip_call_ra_chain(skb))
1731                                     return 0;
1732                     } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
1733                             /* IGMPv1 (and broken IGMPv2 implementations sort of
1734                                Cisco IOS <= 11.2(8)) do not put router alert
1735                                option to IGMP packets destined to routable
1736                                groups. It is very bad, because it means
1737                                that we can forward NO IGMP messages.
1738                              */
1739                             read_lock(&mrt_lock);
1740                             if (mrt->mroute_sk) {
1741                                     nf_reset(skb);
1742                                     raw_rcv(mrt->mroute_sk, skb);
1743                                     read_unlock(&mrt_lock);
1744                                     return 0;
1745                             }
1746                             read_unlock(&mrt_lock);
1747                     }
1748         }
1749
1750         read_lock(&mrt_lock);
1751         cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1752
1753         /*
1754          *      No usable cache entry
1755          */
1756         if (cache == NULL) {
1757                 int vif;
1758
1759                 if (local) {
1760                         struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1761                         ip_local_deliver(skb);
1762                         if (skb2 == NULL) {
1763                                 read_unlock(&mrt_lock);
1764                                 return -ENOBUFS;
1765                         }
1766                         skb = skb2;
1767                 }
1768
1769                 vif = ipmr_find_vif(mrt, skb->dev);
1770                 if (vif >= 0) {
1771                         int err = ipmr_cache_unresolved(mrt, vif, skb);
1772                         read_unlock(&mrt_lock);
1773
1774                         return err;
1775                 }
1776                 read_unlock(&mrt_lock);
1777                 kfree_skb(skb);
1778                 return -ENODEV;
1779         }
1780
1781         ip_mr_forward(net, mrt, skb, cache, local);
1782
1783         read_unlock(&mrt_lock);
1784
1785         if (local)
1786                 return ip_local_deliver(skb);
1787
1788         return 0;
1789
1790 dont_forward:
1791         if (local)
1792                 return ip_local_deliver(skb);
1793         kfree_skb(skb);
1794         return 0;
1795 }
1796
1797 #ifdef CONFIG_IP_PIMSM
1798 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1799                      unsigned int pimlen)
1800 {
1801         struct net_device *reg_dev = NULL;
1802         struct iphdr *encap;
1803
1804         encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1805         /*
1806            Check that:
1807            a. packet is really destinted to a multicast group
1808            b. packet is not a NULL-REGISTER
1809            c. packet is not truncated
1810          */
1811         if (!ipv4_is_multicast(encap->daddr) ||
1812             encap->tot_len == 0 ||
1813             ntohs(encap->tot_len) + pimlen > skb->len)
1814                 return 1;
1815
1816         read_lock(&mrt_lock);
1817         if (mrt->mroute_reg_vif_num >= 0)
1818                 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1819         if (reg_dev)
1820                 dev_hold(reg_dev);
1821         read_unlock(&mrt_lock);
1822
1823         if (reg_dev == NULL)
1824                 return 1;
1825
1826         skb->mac_header = skb->network_header;
1827         skb_pull(skb, (u8*)encap - skb->data);
1828         skb_reset_network_header(skb);
1829         skb->dev = reg_dev;
1830         skb->protocol = htons(ETH_P_IP);
1831         skb->ip_summed = 0;
1832         skb->pkt_type = PACKET_HOST;
1833         skb_dst_drop(skb);
1834         reg_dev->stats.rx_bytes += skb->len;
1835         reg_dev->stats.rx_packets++;
1836         nf_reset(skb);
1837         netif_rx(skb);
1838         dev_put(reg_dev);
1839
1840         return 0;
1841 }
1842 #endif
1843
1844 #ifdef CONFIG_IP_PIMSM_V1
1845 /*
1846  * Handle IGMP messages of PIMv1
1847  */
1848
1849 int pim_rcv_v1(struct sk_buff * skb)
1850 {
1851         struct igmphdr *pim;
1852         struct net *net = dev_net(skb->dev);
1853         struct mr_table *mrt;
1854
1855         if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1856                 goto drop;
1857
1858         pim = igmp_hdr(skb);
1859
1860         if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
1861                 goto drop;
1862
1863         if (!mrt->mroute_do_pim ||
1864             pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1865                 goto drop;
1866
1867         if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1868 drop:
1869                 kfree_skb(skb);
1870         }
1871         return 0;
1872 }
1873 #endif
1874
1875 #ifdef CONFIG_IP_PIMSM_V2
1876 static int pim_rcv(struct sk_buff * skb)
1877 {
1878         struct pimreghdr *pim;
1879         struct net *net = dev_net(skb->dev);
1880         struct mr_table *mrt;
1881
1882         if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1883                 goto drop;
1884
1885         pim = (struct pimreghdr *)skb_transport_header(skb);
1886         if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1887             (pim->flags&PIM_NULL_REGISTER) ||
1888             (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1889              csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1890                 goto drop;
1891
1892         if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
1893                 goto drop;
1894
1895         if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1896 drop:
1897                 kfree_skb(skb);
1898         }
1899         return 0;
1900 }
1901 #endif
1902
1903 static int
1904 ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c,
1905                  struct rtmsg *rtm)
1906 {
1907         int ct;
1908         struct rtnexthop *nhp;
1909         u8 *b = skb_tail_pointer(skb);
1910         struct rtattr *mp_head;
1911
1912         /* If cache is unresolved, don't try to parse IIF and OIF */
1913         if (c->mfc_parent > MAXVIFS)
1914                 return -ENOENT;
1915
1916         if (VIF_EXISTS(mrt, c->mfc_parent))
1917                 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
1918
1919         mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1920
1921         for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1922                 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
1923                         if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1924                                 goto rtattr_failure;
1925                         nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1926                         nhp->rtnh_flags = 0;
1927                         nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1928                         nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
1929                         nhp->rtnh_len = sizeof(*nhp);
1930                 }
1931         }
1932         mp_head->rta_type = RTA_MULTIPATH;
1933         mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1934         rtm->rtm_type = RTN_MULTICAST;
1935         return 1;
1936
1937 rtattr_failure:
1938         nlmsg_trim(skb, b);
1939         return -EMSGSIZE;
1940 }
1941
1942 int ipmr_get_route(struct net *net,
1943                    struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1944 {
1945         int err;
1946         struct mr_table *mrt;
1947         struct mfc_cache *cache;
1948         struct rtable *rt = skb_rtable(skb);
1949
1950         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
1951         if (mrt == NULL)
1952                 return -ENOENT;
1953
1954         read_lock(&mrt_lock);
1955         cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
1956
1957         if (cache == NULL) {
1958                 struct sk_buff *skb2;
1959                 struct iphdr *iph;
1960                 struct net_device *dev;
1961                 int vif;
1962
1963                 if (nowait) {
1964                         read_unlock(&mrt_lock);
1965                         return -EAGAIN;
1966                 }
1967
1968                 dev = skb->dev;
1969                 if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
1970                         read_unlock(&mrt_lock);
1971                         return -ENODEV;
1972                 }
1973                 skb2 = skb_clone(skb, GFP_ATOMIC);
1974                 if (!skb2) {
1975                         read_unlock(&mrt_lock);
1976                         return -ENOMEM;
1977                 }
1978
1979                 skb_push(skb2, sizeof(struct iphdr));
1980                 skb_reset_network_header(skb2);
1981                 iph = ip_hdr(skb2);
1982                 iph->ihl = sizeof(struct iphdr) >> 2;
1983                 iph->saddr = rt->rt_src;
1984                 iph->daddr = rt->rt_dst;
1985                 iph->version = 0;
1986                 err = ipmr_cache_unresolved(mrt, vif, skb2);
1987                 read_unlock(&mrt_lock);
1988                 return err;
1989         }
1990
1991         if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1992                 cache->mfc_flags |= MFC_NOTIFY;
1993         err = ipmr_fill_mroute(mrt, skb, cache, rtm);
1994         read_unlock(&mrt_lock);
1995         return err;
1996 }
1997
1998 #ifdef CONFIG_PROC_FS
1999 /*
2000  *      The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
2001  */
2002 struct ipmr_vif_iter {
2003         struct seq_net_private p;
2004         struct mr_table *mrt;
2005         int ct;
2006 };
2007
2008 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2009                                            struct ipmr_vif_iter *iter,
2010                                            loff_t pos)
2011 {
2012         struct mr_table *mrt = iter->mrt;
2013
2014         for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2015                 if (!VIF_EXISTS(mrt, iter->ct))
2016                         continue;
2017                 if (pos-- == 0)
2018                         return &mrt->vif_table[iter->ct];
2019         }
2020         return NULL;
2021 }
2022
2023 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2024         __acquires(mrt_lock)
2025 {
2026         struct ipmr_vif_iter *iter = seq->private;
2027         struct net *net = seq_file_net(seq);
2028         struct mr_table *mrt;
2029
2030         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2031         if (mrt == NULL)
2032                 return ERR_PTR(-ENOENT);
2033
2034         iter->mrt = mrt;
2035
2036         read_lock(&mrt_lock);
2037         return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2038                 : SEQ_START_TOKEN;
2039 }
2040
2041 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2042 {
2043         struct ipmr_vif_iter *iter = seq->private;
2044         struct net *net = seq_file_net(seq);
2045         struct mr_table *mrt = iter->mrt;
2046
2047         ++*pos;
2048         if (v == SEQ_START_TOKEN)
2049                 return ipmr_vif_seq_idx(net, iter, 0);
2050
2051         while (++iter->ct < mrt->maxvif) {
2052                 if (!VIF_EXISTS(mrt, iter->ct))
2053                         continue;
2054                 return &mrt->vif_table[iter->ct];
2055         }
2056         return NULL;
2057 }
2058
2059 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2060         __releases(mrt_lock)
2061 {
2062         read_unlock(&mrt_lock);
2063 }
2064
2065 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2066 {
2067         struct ipmr_vif_iter *iter = seq->private;
2068         struct mr_table *mrt = iter->mrt;
2069
2070         if (v == SEQ_START_TOKEN) {
2071                 seq_puts(seq,
2072                          "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags Local    Remote\n");
2073         } else {
2074                 const struct vif_device *vif = v;
2075                 const char *name =  vif->dev ? vif->dev->name : "none";
2076
2077                 seq_printf(seq,
2078                            "%2Zd %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
2079                            vif - mrt->vif_table,
2080                            name, vif->bytes_in, vif->pkt_in,
2081                            vif->bytes_out, vif->pkt_out,
2082                            vif->flags, vif->local, vif->remote);
2083         }
2084         return 0;
2085 }
2086
2087 static const struct seq_operations ipmr_vif_seq_ops = {
2088         .start = ipmr_vif_seq_start,
2089         .next  = ipmr_vif_seq_next,
2090         .stop  = ipmr_vif_seq_stop,
2091         .show  = ipmr_vif_seq_show,
2092 };
2093
2094 static int ipmr_vif_open(struct inode *inode, struct file *file)
2095 {
2096         return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2097                             sizeof(struct ipmr_vif_iter));
2098 }
2099
2100 static const struct file_operations ipmr_vif_fops = {
2101         .owner   = THIS_MODULE,
2102         .open    = ipmr_vif_open,
2103         .read    = seq_read,
2104         .llseek  = seq_lseek,
2105         .release = seq_release_net,
2106 };
2107
2108 struct ipmr_mfc_iter {
2109         struct seq_net_private p;
2110         struct mr_table *mrt;
2111         struct list_head *cache;
2112         int ct;
2113 };
2114
2115
2116 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2117                                           struct ipmr_mfc_iter *it, loff_t pos)
2118 {
2119         struct mr_table *mrt = it->mrt;
2120         struct mfc_cache *mfc;
2121
2122         read_lock(&mrt_lock);
2123         for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2124                 it->cache = &mrt->mfc_cache_array[it->ct];
2125                 list_for_each_entry(mfc, it->cache, list)
2126                         if (pos-- == 0)
2127                                 return mfc;
2128         }
2129         read_unlock(&mrt_lock);
2130
2131         spin_lock_bh(&mfc_unres_lock);
2132         it->cache = &mrt->mfc_unres_queue;
2133         list_for_each_entry(mfc, it->cache, list)
2134                 if (pos-- == 0)
2135                         return mfc;
2136         spin_unlock_bh(&mfc_unres_lock);
2137
2138         it->cache = NULL;
2139         return NULL;
2140 }
2141
2142
2143 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2144 {
2145         struct ipmr_mfc_iter *it = seq->private;
2146         struct net *net = seq_file_net(seq);
2147         struct mr_table *mrt;
2148
2149         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2150         if (mrt == NULL)
2151                 return ERR_PTR(-ENOENT);
2152
2153         it->mrt = mrt;
2154         it->cache = NULL;
2155         it->ct = 0;
2156         return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2157                 : SEQ_START_TOKEN;
2158 }
2159
2160 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2161 {
2162         struct mfc_cache *mfc = v;
2163         struct ipmr_mfc_iter *it = seq->private;
2164         struct net *net = seq_file_net(seq);
2165         struct mr_table *mrt = it->mrt;
2166
2167         ++*pos;
2168
2169         if (v == SEQ_START_TOKEN)
2170                 return ipmr_mfc_seq_idx(net, seq->private, 0);
2171
2172         if (mfc->list.next != it->cache)
2173                 return list_entry(mfc->list.next, struct mfc_cache, list);
2174
2175         if (it->cache == &mrt->mfc_unres_queue)
2176                 goto end_of_list;
2177
2178         BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2179
2180         while (++it->ct < MFC_LINES) {
2181                 it->cache = &mrt->mfc_cache_array[it->ct];
2182                 if (list_empty(it->cache))
2183                         continue;
2184                 return list_first_entry(it->cache, struct mfc_cache, list);
2185         }
2186
2187         /* exhausted cache_array, show unresolved */
2188         read_unlock(&mrt_lock);
2189         it->cache = &mrt->mfc_unres_queue;
2190         it->ct = 0;
2191
2192         spin_lock_bh(&mfc_unres_lock);
2193         if (!list_empty(it->cache))
2194                 return list_first_entry(it->cache, struct mfc_cache, list);
2195
2196  end_of_list:
2197         spin_unlock_bh(&mfc_unres_lock);
2198         it->cache = NULL;
2199
2200         return NULL;
2201 }
2202
2203 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2204 {
2205         struct ipmr_mfc_iter *it = seq->private;
2206         struct mr_table *mrt = it->mrt;
2207
2208         if (it->cache == &mrt->mfc_unres_queue)
2209                 spin_unlock_bh(&mfc_unres_lock);
2210         else if (it->cache == &mrt->mfc_cache_array[it->ct])
2211                 read_unlock(&mrt_lock);
2212 }
2213
2214 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2215 {
2216         int n;
2217
2218         if (v == SEQ_START_TOKEN) {
2219                 seq_puts(seq,
2220                  "Group    Origin   Iif     Pkts    Bytes    Wrong Oifs\n");
2221         } else {
2222                 const struct mfc_cache *mfc = v;
2223                 const struct ipmr_mfc_iter *it = seq->private;
2224                 const struct mr_table *mrt = it->mrt;
2225
2226                 seq_printf(seq, "%08lX %08lX %-3hd",
2227                            (unsigned long) mfc->mfc_mcastgrp,
2228                            (unsigned long) mfc->mfc_origin,
2229                            mfc->mfc_parent);
2230
2231                 if (it->cache != &mrt->mfc_unres_queue) {
2232                         seq_printf(seq, " %8lu %8lu %8lu",
2233                                    mfc->mfc_un.res.pkt,
2234                                    mfc->mfc_un.res.bytes,
2235                                    mfc->mfc_un.res.wrong_if);
2236                         for (n = mfc->mfc_un.res.minvif;
2237                              n < mfc->mfc_un.res.maxvif; n++ ) {
2238                                 if (VIF_EXISTS(mrt, n) &&
2239                                     mfc->mfc_un.res.ttls[n] < 255)
2240                                         seq_printf(seq,
2241                                            " %2d:%-3d",
2242                                            n, mfc->mfc_un.res.ttls[n]);
2243                         }
2244                 } else {
2245                         /* unresolved mfc_caches don't contain
2246                          * pkt, bytes and wrong_if values
2247                          */
2248                         seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2249                 }
2250                 seq_putc(seq, '\n');
2251         }
2252         return 0;
2253 }
2254
2255 static const struct seq_operations ipmr_mfc_seq_ops = {
2256         .start = ipmr_mfc_seq_start,
2257         .next  = ipmr_mfc_seq_next,
2258         .stop  = ipmr_mfc_seq_stop,
2259         .show  = ipmr_mfc_seq_show,
2260 };
2261
2262 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2263 {
2264         return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2265                             sizeof(struct ipmr_mfc_iter));
2266 }
2267
2268 static const struct file_operations ipmr_mfc_fops = {
2269         .owner   = THIS_MODULE,
2270         .open    = ipmr_mfc_open,
2271         .read    = seq_read,
2272         .llseek  = seq_lseek,
2273         .release = seq_release_net,
2274 };
2275 #endif
2276
2277 #ifdef CONFIG_IP_PIMSM_V2
2278 static const struct net_protocol pim_protocol = {
2279         .handler        =       pim_rcv,
2280         .netns_ok       =       1,
2281 };
2282 #endif
2283
2284
2285 /*
2286  *      Setup for IP multicast routing
2287  */
2288 static int __net_init ipmr_net_init(struct net *net)
2289 {
2290         int err;
2291
2292         err = ipmr_rules_init(net);
2293         if (err < 0)
2294                 goto fail;
2295
2296 #ifdef CONFIG_PROC_FS
2297         err = -ENOMEM;
2298         if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
2299                 goto proc_vif_fail;
2300         if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
2301                 goto proc_cache_fail;
2302 #endif
2303         return 0;
2304
2305 #ifdef CONFIG_PROC_FS
2306 proc_cache_fail:
2307         proc_net_remove(net, "ip_mr_vif");
2308 proc_vif_fail:
2309         ipmr_rules_exit(net);
2310 #endif
2311 fail:
2312         return err;
2313 }
2314
2315 static void __net_exit ipmr_net_exit(struct net *net)
2316 {
2317 #ifdef CONFIG_PROC_FS
2318         proc_net_remove(net, "ip_mr_cache");
2319         proc_net_remove(net, "ip_mr_vif");
2320 #endif
2321         ipmr_rules_exit(net);
2322 }
2323
2324 static struct pernet_operations ipmr_net_ops = {
2325         .init = ipmr_net_init,
2326         .exit = ipmr_net_exit,
2327 };
2328
2329 int __init ip_mr_init(void)
2330 {
2331         int err;
2332
2333         mrt_cachep = kmem_cache_create("ip_mrt_cache",
2334                                        sizeof(struct mfc_cache),
2335                                        0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2336                                        NULL);
2337         if (!mrt_cachep)
2338                 return -ENOMEM;
2339
2340         err = register_pernet_subsys(&ipmr_net_ops);
2341         if (err)
2342                 goto reg_pernet_fail;
2343
2344         err = register_netdevice_notifier(&ip_mr_notifier);
2345         if (err)
2346                 goto reg_notif_fail;
2347 #ifdef CONFIG_IP_PIMSM_V2
2348         if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2349                 printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n");
2350                 err = -EAGAIN;
2351                 goto add_proto_fail;
2352         }
2353 #endif
2354         return 0;
2355
2356 #ifdef CONFIG_IP_PIMSM_V2
2357 add_proto_fail:
2358         unregister_netdevice_notifier(&ip_mr_notifier);
2359 #endif
2360 reg_notif_fail:
2361         unregister_pernet_subsys(&ipmr_net_ops);
2362 reg_pernet_fail:
2363         kmem_cache_destroy(mrt_cachep);
2364         return err;
2365 }