2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requrement to work with older peers.
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
36 #include <linux/kernel.h>
37 #include <linux/fcntl.h>
38 #include <linux/stat.h>
39 #include <linux/socket.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/igmp.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/mroute.h>
48 #include <linux/init.h>
49 #include <linux/if_ether.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
63 #include <net/checksum.h>
64 #include <net/netlink.h>
66 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67 #define CONFIG_IP_PIMSM 1
70 /* Big lock, protecting vif table, mrt cache and mroute socket state.
71 Note that the changes are semaphored via rtnl_lock.
74 static DEFINE_RWLOCK(mrt_lock);
77 * Multicast router control variables
80 #define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL)
82 static int mroute_do_assert; /* Set in PIM assert */
83 static int mroute_do_pim;
85 static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
87 static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
88 static atomic_t cache_resolve_queue_len; /* Size of unresolved */
90 /* Special spinlock for queue of unresolved entries */
91 static DEFINE_SPINLOCK(mfc_unres_lock);
93 /* We return to original Alan's scheme. Hash table of resolved
94 entries is changed only in process context and protected
95 with weak lock mrt_lock. Queue of unresolved entries is protected
96 with strong spinlock mfc_unres_lock.
98 In this case data path is free of exclusive locks at all.
101 static struct kmem_cache *mrt_cachep __read_mostly;
103 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
104 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
105 static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
107 #ifdef CONFIG_IP_PIMSM_V2
108 static struct net_protocol pim_protocol;
111 static struct timer_list ipmr_expire_timer;
113 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
115 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
119 dev = __dev_get_by_name(&init_net, "tunl0");
121 const struct net_device_ops *ops = dev->netdev_ops;
123 struct ip_tunnel_parm p;
125 memset(&p, 0, sizeof(p));
126 p.iph.daddr = v->vifc_rmt_addr.s_addr;
127 p.iph.saddr = v->vifc_lcl_addr.s_addr;
130 p.iph.protocol = IPPROTO_IPIP;
131 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
132 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
134 if (ops->ndo_do_ioctl) {
135 mm_segment_t oldfs = get_fs();
138 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
145 struct net_device *ipmr_new_tunnel(struct vifctl *v)
147 struct net_device *dev;
149 dev = __dev_get_by_name(&init_net, "tunl0");
152 const struct net_device_ops *ops = dev->netdev_ops;
155 struct ip_tunnel_parm p;
156 struct in_device *in_dev;
158 memset(&p, 0, sizeof(p));
159 p.iph.daddr = v->vifc_rmt_addr.s_addr;
160 p.iph.saddr = v->vifc_lcl_addr.s_addr;
163 p.iph.protocol = IPPROTO_IPIP;
164 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
165 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
167 if (ops->ndo_do_ioctl) {
168 mm_segment_t oldfs = get_fs();
171 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
178 if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) {
179 dev->flags |= IFF_MULTICAST;
181 in_dev = __in_dev_get_rtnl(dev);
185 ipv4_devconf_setall(in_dev);
186 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
196 /* allow the register to be completed before unregistering. */
200 unregister_netdevice(dev);
204 #ifdef CONFIG_IP_PIMSM
206 static int reg_vif_num = -1;
208 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
210 read_lock(&mrt_lock);
211 dev->stats.tx_bytes += skb->len;
212 dev->stats.tx_packets++;
213 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
214 read_unlock(&mrt_lock);
219 static const struct net_device_ops reg_vif_netdev_ops = {
220 .ndo_start_xmit = reg_vif_xmit,
223 static void reg_vif_setup(struct net_device *dev)
225 dev->type = ARPHRD_PIMREG;
226 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
227 dev->flags = IFF_NOARP;
228 dev->netdev_ops = ®_vif_netdev_ops,
229 dev->destructor = free_netdev;
232 static struct net_device *ipmr_reg_vif(void)
234 struct net_device *dev;
235 struct in_device *in_dev;
237 dev = alloc_netdev(0, "pimreg", reg_vif_setup);
242 if (register_netdevice(dev)) {
249 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
254 ipv4_devconf_setall(in_dev);
255 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
266 /* allow the register to be completed before unregistering. */
270 unregister_netdevice(dev);
277 * @notify: Set to 1, if the caller is a notifier_call
280 static int vif_delete(int vifi, int notify)
282 struct vif_device *v;
283 struct net_device *dev;
284 struct in_device *in_dev;
286 if (vifi < 0 || vifi >= init_net.ipv4.maxvif)
287 return -EADDRNOTAVAIL;
289 v = &init_net.ipv4.vif_table[vifi];
291 write_lock_bh(&mrt_lock);
296 write_unlock_bh(&mrt_lock);
297 return -EADDRNOTAVAIL;
300 #ifdef CONFIG_IP_PIMSM
301 if (vifi == reg_vif_num)
305 if (vifi+1 == init_net.ipv4.maxvif) {
307 for (tmp=vifi-1; tmp>=0; tmp--) {
308 if (VIF_EXISTS(&init_net, tmp))
311 init_net.ipv4.maxvif = tmp+1;
314 write_unlock_bh(&mrt_lock);
316 dev_set_allmulti(dev, -1);
318 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
319 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
320 ip_rt_multicast_event(in_dev);
323 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
324 unregister_netdevice(dev);
330 static inline void ipmr_cache_free(struct mfc_cache *c)
332 release_net(mfc_net(c));
333 kmem_cache_free(mrt_cachep, c);
336 /* Destroy an unresolved cache entry, killing queued skbs
337 and reporting error to netlink readers.
340 static void ipmr_destroy_unres(struct mfc_cache *c)
345 atomic_dec(&cache_resolve_queue_len);
347 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
348 if (ip_hdr(skb)->version == 0) {
349 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
350 nlh->nlmsg_type = NLMSG_ERROR;
351 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
352 skb_trim(skb, nlh->nlmsg_len);
354 e->error = -ETIMEDOUT;
355 memset(&e->msg, 0, sizeof(e->msg));
357 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
366 /* Single timer process for all the unresolved queue. */
368 static void ipmr_expire_process(unsigned long dummy)
371 unsigned long expires;
372 struct mfc_cache *c, **cp;
374 if (!spin_trylock(&mfc_unres_lock)) {
375 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
379 if (atomic_read(&cache_resolve_queue_len) == 0)
384 cp = &mfc_unres_queue;
386 while ((c=*cp) != NULL) {
387 if (time_after(c->mfc_un.unres.expires, now)) {
388 unsigned long interval = c->mfc_un.unres.expires - now;
389 if (interval < expires)
397 ipmr_destroy_unres(c);
400 if (atomic_read(&cache_resolve_queue_len))
401 mod_timer(&ipmr_expire_timer, jiffies + expires);
404 spin_unlock(&mfc_unres_lock);
407 /* Fill oifs list. It is called under write locked mrt_lock. */
409 static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
413 cache->mfc_un.res.minvif = MAXVIFS;
414 cache->mfc_un.res.maxvif = 0;
415 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
417 for (vifi = 0; vifi < init_net.ipv4.maxvif; vifi++) {
418 if (VIF_EXISTS(&init_net, vifi) &&
419 ttls[vifi] && ttls[vifi] < 255) {
420 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
421 if (cache->mfc_un.res.minvif > vifi)
422 cache->mfc_un.res.minvif = vifi;
423 if (cache->mfc_un.res.maxvif <= vifi)
424 cache->mfc_un.res.maxvif = vifi + 1;
429 static int vif_add(struct vifctl *vifc, int mrtsock)
431 int vifi = vifc->vifc_vifi;
432 struct vif_device *v = &init_net.ipv4.vif_table[vifi];
433 struct net_device *dev;
434 struct in_device *in_dev;
438 if (VIF_EXISTS(&init_net, vifi))
441 switch (vifc->vifc_flags) {
442 #ifdef CONFIG_IP_PIMSM
445 * Special Purpose VIF in PIM
446 * All the packets will be sent to the daemon
448 if (reg_vif_num >= 0)
450 dev = ipmr_reg_vif();
453 err = dev_set_allmulti(dev, 1);
455 unregister_netdevice(dev);
462 dev = ipmr_new_tunnel(vifc);
465 err = dev_set_allmulti(dev, 1);
467 ipmr_del_tunnel(dev, vifc);
473 dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr);
475 return -EADDRNOTAVAIL;
476 err = dev_set_allmulti(dev, 1);
486 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
487 return -EADDRNOTAVAIL;
488 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
489 ip_rt_multicast_event(in_dev);
492 * Fill in the VIF structures
494 v->rate_limit = vifc->vifc_rate_limit;
495 v->local = vifc->vifc_lcl_addr.s_addr;
496 v->remote = vifc->vifc_rmt_addr.s_addr;
497 v->flags = vifc->vifc_flags;
499 v->flags |= VIFF_STATIC;
500 v->threshold = vifc->vifc_threshold;
505 v->link = dev->ifindex;
506 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
507 v->link = dev->iflink;
509 /* And finish update writing critical data */
510 write_lock_bh(&mrt_lock);
512 #ifdef CONFIG_IP_PIMSM
513 if (v->flags&VIFF_REGISTER)
516 if (vifi+1 > init_net.ipv4.maxvif)
517 init_net.ipv4.maxvif = vifi+1;
518 write_unlock_bh(&mrt_lock);
522 static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
524 int line = MFC_HASH(mcastgrp, origin);
527 for (c=mfc_cache_array[line]; c; c = c->next) {
528 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
535 * Allocate a multicast cache entry
537 static struct mfc_cache *ipmr_cache_alloc(struct net *net)
539 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
542 c->mfc_un.res.minvif = MAXVIFS;
547 static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
549 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
552 skb_queue_head_init(&c->mfc_un.unres.unresolved);
553 c->mfc_un.unres.expires = jiffies + 10*HZ;
559 * A cache entry has gone into a resolved state from queued
562 static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
568 * Play the pending entries through our router
571 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
572 if (ip_hdr(skb)->version == 0) {
573 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
575 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
576 nlh->nlmsg_len = (skb_tail_pointer(skb) -
579 nlh->nlmsg_type = NLMSG_ERROR;
580 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
581 skb_trim(skb, nlh->nlmsg_len);
583 e->error = -EMSGSIZE;
584 memset(&e->msg, 0, sizeof(e->msg));
587 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
589 ip_mr_forward(skb, c, 0);
594 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
595 * expects the following bizarre scheme.
597 * Called under mrt_lock.
600 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
603 const int ihl = ip_hdrlen(pkt);
604 struct igmphdr *igmp;
608 #ifdef CONFIG_IP_PIMSM
609 if (assert == IGMPMSG_WHOLEPKT)
610 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
613 skb = alloc_skb(128, GFP_ATOMIC);
618 #ifdef CONFIG_IP_PIMSM
619 if (assert == IGMPMSG_WHOLEPKT) {
620 /* Ugly, but we have no choice with this interface.
621 Duplicate old header, fix ihl, length etc.
622 And all this only to mangle msg->im_msgtype and
623 to set msg->im_mbz to "mbz" :-)
625 skb_push(skb, sizeof(struct iphdr));
626 skb_reset_network_header(skb);
627 skb_reset_transport_header(skb);
628 msg = (struct igmpmsg *)skb_network_header(skb);
629 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
630 msg->im_msgtype = IGMPMSG_WHOLEPKT;
632 msg->im_vif = reg_vif_num;
633 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
634 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
635 sizeof(struct iphdr));
644 skb->network_header = skb->tail;
646 skb_copy_to_linear_data(skb, pkt->data, ihl);
647 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
648 msg = (struct igmpmsg *)skb_network_header(skb);
650 skb->dst = dst_clone(pkt->dst);
656 igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
658 msg->im_msgtype = assert;
660 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
661 skb->transport_header = skb->network_header;
664 if (init_net.ipv4.mroute_sk == NULL) {
672 ret = sock_queue_rcv_skb(init_net.ipv4.mroute_sk, skb);
675 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
683 * Queue a packet for resolution. It gets locked cache entry!
687 ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
691 const struct iphdr *iph = ip_hdr(skb);
693 spin_lock_bh(&mfc_unres_lock);
694 for (c=mfc_unres_queue; c; c=c->next) {
695 if (c->mfc_mcastgrp == iph->daddr &&
696 c->mfc_origin == iph->saddr)
702 * Create a new entry if allowable
705 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
706 (c = ipmr_cache_alloc_unres(&init_net)) == NULL) {
707 spin_unlock_bh(&mfc_unres_lock);
714 * Fill in the new cache entry
717 c->mfc_origin = iph->saddr;
718 c->mfc_mcastgrp = iph->daddr;
721 * Reflect first query at mrouted.
723 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
724 /* If the report failed throw the cache entry
727 spin_unlock_bh(&mfc_unres_lock);
734 atomic_inc(&cache_resolve_queue_len);
735 c->next = mfc_unres_queue;
738 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
742 * See if we can append the packet
744 if (c->mfc_un.unres.unresolved.qlen>3) {
748 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
752 spin_unlock_bh(&mfc_unres_lock);
757 * MFC cache manipulation by user space mroute daemon
760 static int ipmr_mfc_delete(struct mfcctl *mfc)
763 struct mfc_cache *c, **cp;
765 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
767 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
768 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
769 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
770 write_lock_bh(&mrt_lock);
772 write_unlock_bh(&mrt_lock);
781 static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
784 struct mfc_cache *uc, *c, **cp;
786 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
788 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
789 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
790 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
795 write_lock_bh(&mrt_lock);
796 c->mfc_parent = mfc->mfcc_parent;
797 ipmr_update_thresholds(c, mfc->mfcc_ttls);
799 c->mfc_flags |= MFC_STATIC;
800 write_unlock_bh(&mrt_lock);
804 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
807 c = ipmr_cache_alloc(&init_net);
811 c->mfc_origin = mfc->mfcc_origin.s_addr;
812 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
813 c->mfc_parent = mfc->mfcc_parent;
814 ipmr_update_thresholds(c, mfc->mfcc_ttls);
816 c->mfc_flags |= MFC_STATIC;
818 write_lock_bh(&mrt_lock);
819 c->next = mfc_cache_array[line];
820 mfc_cache_array[line] = c;
821 write_unlock_bh(&mrt_lock);
824 * Check to see if we resolved a queued list. If so we
825 * need to send on the frames and tidy up.
827 spin_lock_bh(&mfc_unres_lock);
828 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
830 if (uc->mfc_origin == c->mfc_origin &&
831 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
833 if (atomic_dec_and_test(&cache_resolve_queue_len))
834 del_timer(&ipmr_expire_timer);
838 spin_unlock_bh(&mfc_unres_lock);
841 ipmr_cache_resolve(uc, c);
848 * Close the multicast socket, and clear the vif tables etc
851 static void mroute_clean_tables(struct sock *sk)
856 * Shut down all active vif entries
858 for (i = 0; i < init_net.ipv4.maxvif; i++) {
859 if (!(init_net.ipv4.vif_table[i].flags&VIFF_STATIC))
866 for (i=0; i<MFC_LINES; i++) {
867 struct mfc_cache *c, **cp;
869 cp = &mfc_cache_array[i];
870 while ((c = *cp) != NULL) {
871 if (c->mfc_flags&MFC_STATIC) {
875 write_lock_bh(&mrt_lock);
877 write_unlock_bh(&mrt_lock);
883 if (atomic_read(&cache_resolve_queue_len) != 0) {
886 spin_lock_bh(&mfc_unres_lock);
887 while (mfc_unres_queue != NULL) {
889 mfc_unres_queue = c->next;
890 spin_unlock_bh(&mfc_unres_lock);
892 ipmr_destroy_unres(c);
894 spin_lock_bh(&mfc_unres_lock);
896 spin_unlock_bh(&mfc_unres_lock);
900 static void mrtsock_destruct(struct sock *sk)
903 if (sk == init_net.ipv4.mroute_sk) {
904 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
906 write_lock_bh(&mrt_lock);
907 init_net.ipv4.mroute_sk = NULL;
908 write_unlock_bh(&mrt_lock);
910 mroute_clean_tables(sk);
916 * Socket options and virtual interface manipulation. The whole
917 * virtual interface system is a complete heap, but unfortunately
918 * that's how BSD mrouted happens to think. Maybe one day with a proper
919 * MOSPF/PIM router set up we can clean this up.
922 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
928 if (optname != MRT_INIT) {
929 if (sk != init_net.ipv4.mroute_sk && !capable(CAP_NET_ADMIN))
935 if (sk->sk_type != SOCK_RAW ||
936 inet_sk(sk)->num != IPPROTO_IGMP)
938 if (optlen != sizeof(int))
942 if (init_net.ipv4.mroute_sk) {
947 ret = ip_ra_control(sk, 1, mrtsock_destruct);
949 write_lock_bh(&mrt_lock);
950 init_net.ipv4.mroute_sk = sk;
951 write_unlock_bh(&mrt_lock);
953 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
958 if (sk != init_net.ipv4.mroute_sk)
960 return ip_ra_control(sk, 0, NULL);
963 if (optlen != sizeof(vif))
965 if (copy_from_user(&vif, optval, sizeof(vif)))
967 if (vif.vifc_vifi >= MAXVIFS)
970 if (optname == MRT_ADD_VIF) {
971 ret = vif_add(&vif, sk == init_net.ipv4.mroute_sk);
973 ret = vif_delete(vif.vifc_vifi, 0);
979 * Manipulate the forwarding caches. These live
980 * in a sort of kernel/user symbiosis.
984 if (optlen != sizeof(mfc))
986 if (copy_from_user(&mfc, optval, sizeof(mfc)))
989 if (optname == MRT_DEL_MFC)
990 ret = ipmr_mfc_delete(&mfc);
992 ret = ipmr_mfc_add(&mfc, sk == init_net.ipv4.mroute_sk);
996 * Control PIM assert.
1001 if (get_user(v,(int __user *)optval))
1003 mroute_do_assert=(v)?1:0;
1006 #ifdef CONFIG_IP_PIMSM
1011 if (get_user(v,(int __user *)optval))
1017 if (v != mroute_do_pim) {
1019 mroute_do_assert = v;
1020 #ifdef CONFIG_IP_PIMSM_V2
1022 ret = inet_add_protocol(&pim_protocol,
1025 ret = inet_del_protocol(&pim_protocol,
1036 * Spurious command, or MRT_VERSION which you cannot
1040 return -ENOPROTOOPT;
1045 * Getsock opt support for the multicast routing system.
1048 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1053 if (optname != MRT_VERSION &&
1054 #ifdef CONFIG_IP_PIMSM
1057 optname!=MRT_ASSERT)
1058 return -ENOPROTOOPT;
1060 if (get_user(olr, optlen))
1063 olr = min_t(unsigned int, olr, sizeof(int));
1067 if (put_user(olr, optlen))
1069 if (optname == MRT_VERSION)
1071 #ifdef CONFIG_IP_PIMSM
1072 else if (optname == MRT_PIM)
1073 val = mroute_do_pim;
1076 val = mroute_do_assert;
1077 if (copy_to_user(optval, &val, olr))
1083 * The IP multicast ioctl support routines.
1086 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1088 struct sioc_sg_req sr;
1089 struct sioc_vif_req vr;
1090 struct vif_device *vif;
1091 struct mfc_cache *c;
1095 if (copy_from_user(&vr, arg, sizeof(vr)))
1097 if (vr.vifi >= init_net.ipv4.maxvif)
1099 read_lock(&mrt_lock);
1100 vif = &init_net.ipv4.vif_table[vr.vifi];
1101 if (VIF_EXISTS(&init_net, vr.vifi)) {
1102 vr.icount = vif->pkt_in;
1103 vr.ocount = vif->pkt_out;
1104 vr.ibytes = vif->bytes_in;
1105 vr.obytes = vif->bytes_out;
1106 read_unlock(&mrt_lock);
1108 if (copy_to_user(arg, &vr, sizeof(vr)))
1112 read_unlock(&mrt_lock);
1113 return -EADDRNOTAVAIL;
1115 if (copy_from_user(&sr, arg, sizeof(sr)))
1118 read_lock(&mrt_lock);
1119 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1121 sr.pktcnt = c->mfc_un.res.pkt;
1122 sr.bytecnt = c->mfc_un.res.bytes;
1123 sr.wrong_if = c->mfc_un.res.wrong_if;
1124 read_unlock(&mrt_lock);
1126 if (copy_to_user(arg, &sr, sizeof(sr)))
1130 read_unlock(&mrt_lock);
1131 return -EADDRNOTAVAIL;
1133 return -ENOIOCTLCMD;
1138 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1140 struct net_device *dev = ptr;
1141 struct vif_device *v;
1144 if (!net_eq(dev_net(dev), &init_net))
1147 if (event != NETDEV_UNREGISTER)
1149 v = &init_net.ipv4.vif_table[0];
1150 for (ct = 0; ct < init_net.ipv4.maxvif; ct++, v++) {
1158 static struct notifier_block ip_mr_notifier = {
1159 .notifier_call = ipmr_device_event,
1163 * Encapsulate a packet by attaching a valid IPIP header to it.
1164 * This avoids tunnel drivers and other mess and gives us the speed so
1165 * important for multicast video.
1168 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1171 struct iphdr *old_iph = ip_hdr(skb);
1173 skb_push(skb, sizeof(struct iphdr));
1174 skb->transport_header = skb->network_header;
1175 skb_reset_network_header(skb);
1179 iph->tos = old_iph->tos;
1180 iph->ttl = old_iph->ttl;
1184 iph->protocol = IPPROTO_IPIP;
1186 iph->tot_len = htons(skb->len);
1187 ip_select_ident(iph, skb->dst, NULL);
1190 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1194 static inline int ipmr_forward_finish(struct sk_buff *skb)
1196 struct ip_options * opt = &(IPCB(skb)->opt);
1198 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1200 if (unlikely(opt->optlen))
1201 ip_forward_options(skb);
1203 return dst_output(skb);
1207 * Processing handlers for ipmr_forward
1210 static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1212 const struct iphdr *iph = ip_hdr(skb);
1213 struct vif_device *vif = &init_net.ipv4.vif_table[vifi];
1214 struct net_device *dev;
1218 if (vif->dev == NULL)
1221 #ifdef CONFIG_IP_PIMSM
1222 if (vif->flags & VIFF_REGISTER) {
1224 vif->bytes_out += skb->len;
1225 vif->dev->stats.tx_bytes += skb->len;
1226 vif->dev->stats.tx_packets++;
1227 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1233 if (vif->flags&VIFF_TUNNEL) {
1234 struct flowi fl = { .oif = vif->link,
1236 { .daddr = vif->remote,
1237 .saddr = vif->local,
1238 .tos = RT_TOS(iph->tos) } },
1239 .proto = IPPROTO_IPIP };
1240 if (ip_route_output_key(&init_net, &rt, &fl))
1242 encap = sizeof(struct iphdr);
1244 struct flowi fl = { .oif = vif->link,
1246 { .daddr = iph->daddr,
1247 .tos = RT_TOS(iph->tos) } },
1248 .proto = IPPROTO_IPIP };
1249 if (ip_route_output_key(&init_net, &rt, &fl))
1253 dev = rt->u.dst.dev;
1255 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1256 /* Do not fragment multicasts. Alas, IPv4 does not
1257 allow to send ICMP, so that packets will disappear
1261 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1266 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1268 if (skb_cow(skb, encap)) {
1274 vif->bytes_out += skb->len;
1276 dst_release(skb->dst);
1277 skb->dst = &rt->u.dst;
1278 ip_decrease_ttl(ip_hdr(skb));
1280 /* FIXME: forward and output firewalls used to be called here.
1281 * What do we do with netfilter? -- RR */
1282 if (vif->flags & VIFF_TUNNEL) {
1283 ip_encap(skb, vif->local, vif->remote);
1284 /* FIXME: extra output firewall step used to be here. --RR */
1285 vif->dev->stats.tx_packets++;
1286 vif->dev->stats.tx_bytes += skb->len;
1289 IPCB(skb)->flags |= IPSKB_FORWARDED;
1292 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1293 * not only before forwarding, but after forwarding on all output
1294 * interfaces. It is clear, if mrouter runs a multicasting
1295 * program, it should receive packets not depending to what interface
1296 * program is joined.
1297 * If we will not make it, the program will have to join on all
1298 * interfaces. On the other hand, multihoming host (or router, but
1299 * not mrouter) cannot join to more than one interface - it will
1300 * result in receiving multiple packets.
1302 NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
1303 ipmr_forward_finish);
1311 static int ipmr_find_vif(struct net_device *dev)
1314 for (ct = init_net.ipv4.maxvif-1; ct >= 0; ct--) {
1315 if (init_net.ipv4.vif_table[ct].dev == dev)
1321 /* "local" means that we should preserve one skb (for local delivery) */
1323 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1328 vif = cache->mfc_parent;
1329 cache->mfc_un.res.pkt++;
1330 cache->mfc_un.res.bytes += skb->len;
1333 * Wrong interface: drop packet and (maybe) send PIM assert.
1335 if (init_net.ipv4.vif_table[vif].dev != skb->dev) {
1338 if (skb->rtable->fl.iif == 0) {
1339 /* It is our own packet, looped back.
1340 Very complicated situation...
1342 The best workaround until routing daemons will be
1343 fixed is not to redistribute packet, if it was
1344 send through wrong interface. It means, that
1345 multicast applications WILL NOT work for
1346 (S,G), which have default multicast route pointing
1347 to wrong oif. In any case, it is not a good
1348 idea to use multicasting applications on router.
1353 cache->mfc_un.res.wrong_if++;
1354 true_vifi = ipmr_find_vif(skb->dev);
1356 if (true_vifi >= 0 && mroute_do_assert &&
1357 /* pimsm uses asserts, when switching from RPT to SPT,
1358 so that we cannot check that packet arrived on an oif.
1359 It is bad, but otherwise we would need to move pretty
1360 large chunk of pimd to kernel. Ough... --ANK
1362 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1364 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1365 cache->mfc_un.res.last_assert = jiffies;
1366 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1371 init_net.ipv4.vif_table[vif].pkt_in++;
1372 init_net.ipv4.vif_table[vif].bytes_in += skb->len;
1377 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
1378 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1380 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1382 ipmr_queue_xmit(skb2, cache, psend);
1389 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1391 ipmr_queue_xmit(skb2, cache, psend);
1393 ipmr_queue_xmit(skb, cache, psend);
1406 * Multicast packets for forwarding arrive here
1409 int ip_mr_input(struct sk_buff *skb)
1411 struct mfc_cache *cache;
1412 int local = skb->rtable->rt_flags&RTCF_LOCAL;
1414 /* Packet is looped back after forward, it should not be
1415 forwarded second time, but still can be delivered locally.
1417 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1421 if (IPCB(skb)->opt.router_alert) {
1422 if (ip_call_ra_chain(skb))
1424 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
1425 /* IGMPv1 (and broken IGMPv2 implementations sort of
1426 Cisco IOS <= 11.2(8)) do not put router alert
1427 option to IGMP packets destined to routable
1428 groups. It is very bad, because it means
1429 that we can forward NO IGMP messages.
1431 read_lock(&mrt_lock);
1432 if (init_net.ipv4.mroute_sk) {
1434 raw_rcv(init_net.ipv4.mroute_sk, skb);
1435 read_unlock(&mrt_lock);
1438 read_unlock(&mrt_lock);
1442 read_lock(&mrt_lock);
1443 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1446 * No usable cache entry
1448 if (cache == NULL) {
1452 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1453 ip_local_deliver(skb);
1455 read_unlock(&mrt_lock);
1461 vif = ipmr_find_vif(skb->dev);
1463 int err = ipmr_cache_unresolved(vif, skb);
1464 read_unlock(&mrt_lock);
1468 read_unlock(&mrt_lock);
1473 ip_mr_forward(skb, cache, local);
1475 read_unlock(&mrt_lock);
1478 return ip_local_deliver(skb);
1484 return ip_local_deliver(skb);
1489 #ifdef CONFIG_IP_PIMSM
1490 static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1492 struct net_device *reg_dev = NULL;
1493 struct iphdr *encap;
1495 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1498 a. packet is really destinted to a multicast group
1499 b. packet is not a NULL-REGISTER
1500 c. packet is not truncated
1502 if (!ipv4_is_multicast(encap->daddr) ||
1503 encap->tot_len == 0 ||
1504 ntohs(encap->tot_len) + pimlen > skb->len)
1507 read_lock(&mrt_lock);
1508 if (reg_vif_num >= 0)
1509 reg_dev = init_net.ipv4.vif_table[reg_vif_num].dev;
1512 read_unlock(&mrt_lock);
1514 if (reg_dev == NULL)
1517 skb->mac_header = skb->network_header;
1518 skb_pull(skb, (u8*)encap - skb->data);
1519 skb_reset_network_header(skb);
1521 skb->protocol = htons(ETH_P_IP);
1523 skb->pkt_type = PACKET_HOST;
1524 dst_release(skb->dst);
1526 reg_dev->stats.rx_bytes += skb->len;
1527 reg_dev->stats.rx_packets++;
1536 #ifdef CONFIG_IP_PIMSM_V1
1538 * Handle IGMP messages of PIMv1
1541 int pim_rcv_v1(struct sk_buff * skb)
1543 struct igmphdr *pim;
1545 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1548 pim = igmp_hdr(skb);
1550 if (!mroute_do_pim ||
1551 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1554 if (__pim_rcv(skb, sizeof(*pim))) {
1562 #ifdef CONFIG_IP_PIMSM_V2
1563 static int pim_rcv(struct sk_buff * skb)
1565 struct pimreghdr *pim;
1567 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1570 pim = (struct pimreghdr *)skb_transport_header(skb);
1571 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1572 (pim->flags&PIM_NULL_REGISTER) ||
1573 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1574 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1577 if (__pim_rcv(skb, sizeof(*pim))) {
1586 ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1589 struct rtnexthop *nhp;
1590 struct net_device *dev = init_net.ipv4.vif_table[c->mfc_parent].dev;
1591 u8 *b = skb_tail_pointer(skb);
1592 struct rtattr *mp_head;
1595 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1597 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1599 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1600 if (c->mfc_un.res.ttls[ct] < 255) {
1601 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1602 goto rtattr_failure;
1603 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1604 nhp->rtnh_flags = 0;
1605 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1606 nhp->rtnh_ifindex = init_net.ipv4.vif_table[ct].dev->ifindex;
1607 nhp->rtnh_len = sizeof(*nhp);
1610 mp_head->rta_type = RTA_MULTIPATH;
1611 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1612 rtm->rtm_type = RTN_MULTICAST;
1620 int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1623 struct mfc_cache *cache;
1624 struct rtable *rt = skb->rtable;
1626 read_lock(&mrt_lock);
1627 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1629 if (cache == NULL) {
1630 struct sk_buff *skb2;
1632 struct net_device *dev;
1636 read_unlock(&mrt_lock);
1641 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1642 read_unlock(&mrt_lock);
1645 skb2 = skb_clone(skb, GFP_ATOMIC);
1647 read_unlock(&mrt_lock);
1651 skb_push(skb2, sizeof(struct iphdr));
1652 skb_reset_network_header(skb2);
1654 iph->ihl = sizeof(struct iphdr) >> 2;
1655 iph->saddr = rt->rt_src;
1656 iph->daddr = rt->rt_dst;
1658 err = ipmr_cache_unresolved(vif, skb2);
1659 read_unlock(&mrt_lock);
1663 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1664 cache->mfc_flags |= MFC_NOTIFY;
1665 err = ipmr_fill_mroute(skb, cache, rtm);
1666 read_unlock(&mrt_lock);
1670 #ifdef CONFIG_PROC_FS
1672 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1674 struct ipmr_vif_iter {
1678 static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1681 for (iter->ct = 0; iter->ct < init_net.ipv4.maxvif; ++iter->ct) {
1682 if (!VIF_EXISTS(&init_net, iter->ct))
1685 return &init_net.ipv4.vif_table[iter->ct];
1690 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1691 __acquires(mrt_lock)
1693 read_lock(&mrt_lock);
1694 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
1698 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1700 struct ipmr_vif_iter *iter = seq->private;
1703 if (v == SEQ_START_TOKEN)
1704 return ipmr_vif_seq_idx(iter, 0);
1706 while (++iter->ct < init_net.ipv4.maxvif) {
1707 if (!VIF_EXISTS(&init_net, iter->ct))
1709 return &init_net.ipv4.vif_table[iter->ct];
1714 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1715 __releases(mrt_lock)
1717 read_unlock(&mrt_lock);
1720 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1722 if (v == SEQ_START_TOKEN) {
1724 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1726 const struct vif_device *vif = v;
1727 const char *name = vif->dev ? vif->dev->name : "none";
1730 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1731 vif - init_net.ipv4.vif_table,
1732 name, vif->bytes_in, vif->pkt_in,
1733 vif->bytes_out, vif->pkt_out,
1734 vif->flags, vif->local, vif->remote);
1739 static const struct seq_operations ipmr_vif_seq_ops = {
1740 .start = ipmr_vif_seq_start,
1741 .next = ipmr_vif_seq_next,
1742 .stop = ipmr_vif_seq_stop,
1743 .show = ipmr_vif_seq_show,
1746 static int ipmr_vif_open(struct inode *inode, struct file *file)
1748 return seq_open_private(file, &ipmr_vif_seq_ops,
1749 sizeof(struct ipmr_vif_iter));
1752 static const struct file_operations ipmr_vif_fops = {
1753 .owner = THIS_MODULE,
1754 .open = ipmr_vif_open,
1756 .llseek = seq_lseek,
1757 .release = seq_release_private,
1760 struct ipmr_mfc_iter {
1761 struct mfc_cache **cache;
1766 static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1768 struct mfc_cache *mfc;
1770 it->cache = mfc_cache_array;
1771 read_lock(&mrt_lock);
1772 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
1773 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
1776 read_unlock(&mrt_lock);
1778 it->cache = &mfc_unres_queue;
1779 spin_lock_bh(&mfc_unres_lock);
1780 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
1783 spin_unlock_bh(&mfc_unres_lock);
1790 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1792 struct ipmr_mfc_iter *it = seq->private;
1795 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
1799 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1801 struct mfc_cache *mfc = v;
1802 struct ipmr_mfc_iter *it = seq->private;
1806 if (v == SEQ_START_TOKEN)
1807 return ipmr_mfc_seq_idx(seq->private, 0);
1812 if (it->cache == &mfc_unres_queue)
1815 BUG_ON(it->cache != mfc_cache_array);
1817 while (++it->ct < MFC_LINES) {
1818 mfc = mfc_cache_array[it->ct];
1823 /* exhausted cache_array, show unresolved */
1824 read_unlock(&mrt_lock);
1825 it->cache = &mfc_unres_queue;
1828 spin_lock_bh(&mfc_unres_lock);
1829 mfc = mfc_unres_queue;
1834 spin_unlock_bh(&mfc_unres_lock);
1840 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1842 struct ipmr_mfc_iter *it = seq->private;
1844 if (it->cache == &mfc_unres_queue)
1845 spin_unlock_bh(&mfc_unres_lock);
1846 else if (it->cache == mfc_cache_array)
1847 read_unlock(&mrt_lock);
1850 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1854 if (v == SEQ_START_TOKEN) {
1856 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1858 const struct mfc_cache *mfc = v;
1859 const struct ipmr_mfc_iter *it = seq->private;
1861 seq_printf(seq, "%08lX %08lX %-3hd",
1862 (unsigned long) mfc->mfc_mcastgrp,
1863 (unsigned long) mfc->mfc_origin,
1866 if (it->cache != &mfc_unres_queue) {
1867 seq_printf(seq, " %8lu %8lu %8lu",
1868 mfc->mfc_un.res.pkt,
1869 mfc->mfc_un.res.bytes,
1870 mfc->mfc_un.res.wrong_if);
1871 for (n = mfc->mfc_un.res.minvif;
1872 n < mfc->mfc_un.res.maxvif; n++ ) {
1873 if (VIF_EXISTS(&init_net, n) &&
1874 mfc->mfc_un.res.ttls[n] < 255)
1877 n, mfc->mfc_un.res.ttls[n]);
1880 /* unresolved mfc_caches don't contain
1881 * pkt, bytes and wrong_if values
1883 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
1885 seq_putc(seq, '\n');
1890 static const struct seq_operations ipmr_mfc_seq_ops = {
1891 .start = ipmr_mfc_seq_start,
1892 .next = ipmr_mfc_seq_next,
1893 .stop = ipmr_mfc_seq_stop,
1894 .show = ipmr_mfc_seq_show,
1897 static int ipmr_mfc_open(struct inode *inode, struct file *file)
1899 return seq_open_private(file, &ipmr_mfc_seq_ops,
1900 sizeof(struct ipmr_mfc_iter));
1903 static const struct file_operations ipmr_mfc_fops = {
1904 .owner = THIS_MODULE,
1905 .open = ipmr_mfc_open,
1907 .llseek = seq_lseek,
1908 .release = seq_release_private,
1912 #ifdef CONFIG_IP_PIMSM_V2
1913 static struct net_protocol pim_protocol = {
1920 * Setup for IP multicast routing
1922 static int __net_init ipmr_net_init(struct net *net)
1926 net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device),
1928 if (!net->ipv4.vif_table) {
1936 static void __net_exit ipmr_net_exit(struct net *net)
1938 kfree(net->ipv4.vif_table);
1941 static struct pernet_operations ipmr_net_ops = {
1942 .init = ipmr_net_init,
1943 .exit = ipmr_net_exit,
1946 int __init ip_mr_init(void)
1950 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1951 sizeof(struct mfc_cache),
1952 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1957 err = register_pernet_subsys(&ipmr_net_ops);
1959 goto reg_pernet_fail;
1961 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
1962 err = register_netdevice_notifier(&ip_mr_notifier);
1964 goto reg_notif_fail;
1965 #ifdef CONFIG_PROC_FS
1967 if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops))
1969 if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1970 goto proc_cache_fail;
1973 #ifdef CONFIG_PROC_FS
1975 proc_net_remove(&init_net, "ip_mr_vif");
1977 unregister_netdevice_notifier(&ip_mr_notifier);
1980 del_timer(&ipmr_expire_timer);
1981 unregister_pernet_subsys(&ipmr_net_ops);
1983 kmem_cache_destroy(mrt_cachep);