2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/timer.h>
26 #include <linux/kernel.h>
27 #include <linux/fcntl.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/init.h>
36 #include <net/protocol.h>
37 #include <linux/skbuff.h>
40 #include <linux/notifier.h>
41 #include <linux/if_arp.h>
42 #include <net/checksum.h>
43 #include <net/netlink.h>
46 #include <net/ip6_route.h>
47 #include <linux/mroute6.h>
48 #include <linux/pim.h>
49 #include <net/addrconf.h>
50 #include <linux/netfilter_ipv6.h>
52 struct sock *mroute6_socket;
55 /* Big lock, protecting vif table, mrt cache and mroute socket state.
56 Note that the changes are semaphored via rtnl_lock.
59 static DEFINE_RWLOCK(mrt_lock);
62 * Multicast router control variables
65 static struct mif_device vif6_table[MAXMIFS]; /* Devices */
68 #define MIF_EXISTS(idx) (vif6_table[idx].dev != NULL)
70 static int mroute_do_assert; /* Set in PIM assert */
71 #ifdef CONFIG_IPV6_PIMSM_V2
72 static int mroute_do_pim;
74 #define mroute_do_pim 0
77 static struct mfc6_cache *mfc6_cache_array[MFC6_LINES]; /* Forwarding cache */
79 static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
80 static atomic_t cache_resolve_queue_len; /* Size of unresolved */
82 /* Special spinlock for queue of unresolved entries */
83 static DEFINE_SPINLOCK(mfc_unres_lock);
85 /* We return to original Alan's scheme. Hash table of resolved
86 entries is changed only in process context and protected
87 with weak lock mrt_lock. Queue of unresolved entries is protected
88 with strong spinlock mfc_unres_lock.
90 In this case data path is free of exclusive locks at all.
93 static struct kmem_cache *mrt_cachep __read_mostly;
95 static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
96 static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert);
97 static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
99 #ifdef CONFIG_IPV6_PIMSM_V2
100 static struct inet6_protocol pim6_protocol;
103 static struct timer_list ipmr_expire_timer;
106 #ifdef CONFIG_PROC_FS
108 struct ipmr_mfc_iter {
109 struct mfc6_cache **cache;
114 static struct mfc6_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
116 struct mfc6_cache *mfc;
118 it->cache = mfc6_cache_array;
119 read_lock(&mrt_lock);
120 for (it->ct = 0; it->ct < ARRAY_SIZE(mfc6_cache_array); it->ct++)
121 for (mfc = mfc6_cache_array[it->ct]; mfc; mfc = mfc->next)
124 read_unlock(&mrt_lock);
126 it->cache = &mfc_unres_queue;
127 spin_lock_bh(&mfc_unres_lock);
128 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
131 spin_unlock_bh(&mfc_unres_lock);
141 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
144 struct ipmr_vif_iter {
148 static struct mif_device *ip6mr_vif_seq_idx(struct ipmr_vif_iter *iter,
151 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
152 if (!MIF_EXISTS(iter->ct))
155 return &vif6_table[iter->ct];
160 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
163 read_lock(&mrt_lock);
164 return (*pos ? ip6mr_vif_seq_idx(seq->private, *pos - 1)
168 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
170 struct ipmr_vif_iter *iter = seq->private;
173 if (v == SEQ_START_TOKEN)
174 return ip6mr_vif_seq_idx(iter, 0);
176 while (++iter->ct < maxvif) {
177 if (!MIF_EXISTS(iter->ct))
179 return &vif6_table[iter->ct];
184 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
187 read_unlock(&mrt_lock);
190 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
192 if (v == SEQ_START_TOKEN) {
194 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
196 const struct mif_device *vif = v;
197 const char *name = vif->dev ? vif->dev->name : "none";
200 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
202 name, vif->bytes_in, vif->pkt_in,
203 vif->bytes_out, vif->pkt_out,
209 static struct seq_operations ip6mr_vif_seq_ops = {
210 .start = ip6mr_vif_seq_start,
211 .next = ip6mr_vif_seq_next,
212 .stop = ip6mr_vif_seq_stop,
213 .show = ip6mr_vif_seq_show,
216 static int ip6mr_vif_open(struct inode *inode, struct file *file)
218 return seq_open_private(file, &ip6mr_vif_seq_ops,
219 sizeof(struct ipmr_vif_iter));
222 static struct file_operations ip6mr_vif_fops = {
223 .owner = THIS_MODULE,
224 .open = ip6mr_vif_open,
227 .release = seq_release_private,
230 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
232 return (*pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
236 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
238 struct mfc6_cache *mfc = v;
239 struct ipmr_mfc_iter *it = seq->private;
243 if (v == SEQ_START_TOKEN)
244 return ipmr_mfc_seq_idx(seq->private, 0);
249 if (it->cache == &mfc_unres_queue)
252 BUG_ON(it->cache != mfc6_cache_array);
254 while (++it->ct < ARRAY_SIZE(mfc6_cache_array)) {
255 mfc = mfc6_cache_array[it->ct];
260 /* exhausted cache_array, show unresolved */
261 read_unlock(&mrt_lock);
262 it->cache = &mfc_unres_queue;
265 spin_lock_bh(&mfc_unres_lock);
266 mfc = mfc_unres_queue;
271 spin_unlock_bh(&mfc_unres_lock);
277 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
279 struct ipmr_mfc_iter *it = seq->private;
281 if (it->cache == &mfc_unres_queue)
282 spin_unlock_bh(&mfc_unres_lock);
283 else if (it->cache == mfc6_cache_array)
284 read_unlock(&mrt_lock);
287 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
291 if (v == SEQ_START_TOKEN) {
295 "Iif Pkts Bytes Wrong Oifs\n");
297 const struct mfc6_cache *mfc = v;
298 const struct ipmr_mfc_iter *it = seq->private;
300 seq_printf(seq, "%pI6 %pI6 %-3d",
301 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
304 if (it->cache != &mfc_unres_queue) {
305 seq_printf(seq, " %8lu %8lu %8lu",
307 mfc->mfc_un.res.bytes,
308 mfc->mfc_un.res.wrong_if);
309 for (n = mfc->mfc_un.res.minvif;
310 n < mfc->mfc_un.res.maxvif; n++) {
312 mfc->mfc_un.res.ttls[n] < 255)
315 n, mfc->mfc_un.res.ttls[n]);
318 /* unresolved mfc_caches don't contain
319 * pkt, bytes and wrong_if values
321 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
328 static struct seq_operations ipmr_mfc_seq_ops = {
329 .start = ipmr_mfc_seq_start,
330 .next = ipmr_mfc_seq_next,
331 .stop = ipmr_mfc_seq_stop,
332 .show = ipmr_mfc_seq_show,
335 static int ipmr_mfc_open(struct inode *inode, struct file *file)
337 return seq_open_private(file, &ipmr_mfc_seq_ops,
338 sizeof(struct ipmr_mfc_iter));
341 static struct file_operations ip6mr_mfc_fops = {
342 .owner = THIS_MODULE,
343 .open = ipmr_mfc_open,
346 .release = seq_release_private,
350 #ifdef CONFIG_IPV6_PIMSM_V2
351 static int reg_vif_num = -1;
353 static int pim6_rcv(struct sk_buff *skb)
355 struct pimreghdr *pim;
356 struct ipv6hdr *encap;
357 struct net_device *reg_dev = NULL;
359 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
362 pim = (struct pimreghdr *)skb_transport_header(skb);
363 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
364 (pim->flags & PIM_NULL_REGISTER) ||
365 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
366 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
369 /* check if the inner packet is destined to mcast group */
370 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
373 if (!ipv6_addr_is_multicast(&encap->daddr) ||
374 encap->payload_len == 0 ||
375 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
378 read_lock(&mrt_lock);
379 if (reg_vif_num >= 0)
380 reg_dev = vif6_table[reg_vif_num].dev;
383 read_unlock(&mrt_lock);
388 skb->mac_header = skb->network_header;
389 skb_pull(skb, (u8 *)encap - skb->data);
390 skb_reset_network_header(skb);
392 skb->protocol = htons(ETH_P_IP);
394 skb->pkt_type = PACKET_HOST;
395 dst_release(skb->dst);
396 reg_dev->stats.rx_bytes += skb->len;
397 reg_dev->stats.rx_packets++;
408 static struct inet6_protocol pim6_protocol = {
412 /* Service routines creating virtual interfaces: PIMREG */
414 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
416 read_lock(&mrt_lock);
417 dev->stats.tx_bytes += skb->len;
418 dev->stats.tx_packets++;
419 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT);
420 read_unlock(&mrt_lock);
425 static const struct net_device_ops reg_vif_netdev_ops = {
426 .ndo_start_xmit = reg_vif_xmit,
429 static void reg_vif_setup(struct net_device *dev)
431 dev->type = ARPHRD_PIMREG;
432 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
433 dev->flags = IFF_NOARP;
434 dev->netdev_ops = ®_vif_netdev_ops;
435 dev->destructor = free_netdev;
438 static struct net_device *ip6mr_reg_vif(void)
440 struct net_device *dev;
442 dev = alloc_netdev(0, "pim6reg", reg_vif_setup);
446 if (register_netdevice(dev)) {
459 /* allow the register to be completed before unregistering. */
463 unregister_netdevice(dev);
472 static int mif6_delete(int vifi)
474 struct mif_device *v;
475 struct net_device *dev;
476 if (vifi < 0 || vifi >= maxvif)
477 return -EADDRNOTAVAIL;
479 v = &vif6_table[vifi];
481 write_lock_bh(&mrt_lock);
486 write_unlock_bh(&mrt_lock);
487 return -EADDRNOTAVAIL;
490 #ifdef CONFIG_IPV6_PIMSM_V2
491 if (vifi == reg_vif_num)
495 if (vifi + 1 == maxvif) {
497 for (tmp = vifi - 1; tmp >= 0; tmp--) {
504 write_unlock_bh(&mrt_lock);
506 dev_set_allmulti(dev, -1);
508 if (v->flags & MIFF_REGISTER)
509 unregister_netdevice(dev);
515 /* Destroy an unresolved cache entry, killing queued skbs
516 and reporting error to netlink readers.
519 static void ip6mr_destroy_unres(struct mfc6_cache *c)
523 atomic_dec(&cache_resolve_queue_len);
525 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
526 if (ipv6_hdr(skb)->version == 0) {
527 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
528 nlh->nlmsg_type = NLMSG_ERROR;
529 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
530 skb_trim(skb, nlh->nlmsg_len);
531 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
532 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
537 kmem_cache_free(mrt_cachep, c);
541 /* Single timer process for all the unresolved queue. */
543 static void ipmr_do_expire_process(unsigned long dummy)
545 unsigned long now = jiffies;
546 unsigned long expires = 10 * HZ;
547 struct mfc6_cache *c, **cp;
549 cp = &mfc_unres_queue;
551 while ((c = *cp) != NULL) {
552 if (time_after(c->mfc_un.unres.expires, now)) {
554 unsigned long interval = c->mfc_un.unres.expires - now;
555 if (interval < expires)
562 ip6mr_destroy_unres(c);
565 if (atomic_read(&cache_resolve_queue_len))
566 mod_timer(&ipmr_expire_timer, jiffies + expires);
569 static void ipmr_expire_process(unsigned long dummy)
571 if (!spin_trylock(&mfc_unres_lock)) {
572 mod_timer(&ipmr_expire_timer, jiffies + 1);
576 if (atomic_read(&cache_resolve_queue_len))
577 ipmr_do_expire_process(dummy);
579 spin_unlock(&mfc_unres_lock);
582 /* Fill oifs list. It is called under write locked mrt_lock. */
584 static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
588 cache->mfc_un.res.minvif = MAXMIFS;
589 cache->mfc_un.res.maxvif = 0;
590 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
592 for (vifi = 0; vifi < maxvif; vifi++) {
593 if (MIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
594 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
595 if (cache->mfc_un.res.minvif > vifi)
596 cache->mfc_un.res.minvif = vifi;
597 if (cache->mfc_un.res.maxvif <= vifi)
598 cache->mfc_un.res.maxvif = vifi + 1;
603 static int mif6_add(struct mif6ctl *vifc, int mrtsock)
605 int vifi = vifc->mif6c_mifi;
606 struct mif_device *v = &vif6_table[vifi];
607 struct net_device *dev;
611 if (MIF_EXISTS(vifi))
614 switch (vifc->mif6c_flags) {
615 #ifdef CONFIG_IPV6_PIMSM_V2
618 * Special Purpose VIF in PIM
619 * All the packets will be sent to the daemon
621 if (reg_vif_num >= 0)
623 dev = ip6mr_reg_vif();
626 err = dev_set_allmulti(dev, 1);
628 unregister_netdevice(dev);
635 dev = dev_get_by_index(&init_net, vifc->mif6c_pifi);
637 return -EADDRNOTAVAIL;
638 err = dev_set_allmulti(dev, 1);
649 * Fill in the VIF structures
651 v->rate_limit = vifc->vifc_rate_limit;
652 v->flags = vifc->mif6c_flags;
654 v->flags |= VIFF_STATIC;
655 v->threshold = vifc->vifc_threshold;
660 v->link = dev->ifindex;
661 if (v->flags & MIFF_REGISTER)
662 v->link = dev->iflink;
664 /* And finish update writing critical data */
665 write_lock_bh(&mrt_lock);
667 #ifdef CONFIG_IPV6_PIMSM_V2
668 if (v->flags & MIFF_REGISTER)
671 if (vifi + 1 > maxvif)
673 write_unlock_bh(&mrt_lock);
677 static struct mfc6_cache *ip6mr_cache_find(struct in6_addr *origin, struct in6_addr *mcastgrp)
679 int line = MFC6_HASH(mcastgrp, origin);
680 struct mfc6_cache *c;
682 for (c = mfc6_cache_array[line]; c; c = c->next) {
683 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
684 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
691 * Allocate a multicast cache entry
693 static struct mfc6_cache *ip6mr_cache_alloc(void)
695 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
698 memset(c, 0, sizeof(*c));
699 c->mfc_un.res.minvif = MAXMIFS;
703 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
705 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
708 memset(c, 0, sizeof(*c));
709 skb_queue_head_init(&c->mfc_un.unres.unresolved);
710 c->mfc_un.unres.expires = jiffies + 10 * HZ;
715 * A cache entry has gone into a resolved state from queued
718 static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
723 * Play the pending entries through our router
726 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
727 if (ipv6_hdr(skb)->version == 0) {
729 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
731 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
732 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
734 nlh->nlmsg_type = NLMSG_ERROR;
735 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
736 skb_trim(skb, nlh->nlmsg_len);
737 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
739 err = rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
741 ip6_mr_forward(skb, c);
746 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
747 * expects the following bizarre scheme.
749 * Called under mrt_lock.
752 static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert)
758 #ifdef CONFIG_IPV6_PIMSM_V2
759 if (assert == MRT6MSG_WHOLEPKT)
760 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
764 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
769 /* I suppose that internal messages
770 * do not require checksums */
772 skb->ip_summed = CHECKSUM_UNNECESSARY;
774 #ifdef CONFIG_IPV6_PIMSM_V2
775 if (assert == MRT6MSG_WHOLEPKT) {
776 /* Ugly, but we have no choice with this interface.
777 Duplicate old header, fix length etc.
778 And all this only to mangle msg->im6_msgtype and
779 to set msg->im6_mbz to "mbz" :-)
781 skb_push(skb, -skb_network_offset(pkt));
783 skb_push(skb, sizeof(*msg));
784 skb_reset_transport_header(skb);
785 msg = (struct mrt6msg *)skb_transport_header(skb);
787 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
788 msg->im6_mif = reg_vif_num;
790 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
791 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
793 skb->ip_summed = CHECKSUM_UNNECESSARY;
801 skb_put(skb, sizeof(struct ipv6hdr));
802 skb_reset_network_header(skb);
803 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
808 skb_put(skb, sizeof(*msg));
809 skb_reset_transport_header(skb);
810 msg = (struct mrt6msg *)skb_transport_header(skb);
813 msg->im6_msgtype = assert;
816 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
817 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
819 skb->dst = dst_clone(pkt->dst);
820 skb->ip_summed = CHECKSUM_UNNECESSARY;
822 skb_pull(skb, sizeof(struct ipv6hdr));
825 if (mroute6_socket == NULL) {
831 * Deliver to user space multicast routing algorithms
833 if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
835 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
843 * Queue a packet for resolution. It gets locked cache entry!
847 ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb)
850 struct mfc6_cache *c;
852 spin_lock_bh(&mfc_unres_lock);
853 for (c = mfc_unres_queue; c; c = c->next) {
854 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
855 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
861 * Create a new entry if allowable
864 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
865 (c = ip6mr_cache_alloc_unres()) == NULL) {
866 spin_unlock_bh(&mfc_unres_lock);
873 * Fill in the new cache entry
876 c->mf6c_origin = ipv6_hdr(skb)->saddr;
877 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
880 * Reflect first query at pim6sd
882 if ((err = ip6mr_cache_report(skb, mifi, MRT6MSG_NOCACHE)) < 0) {
883 /* If the report failed throw the cache entry
886 spin_unlock_bh(&mfc_unres_lock);
888 kmem_cache_free(mrt_cachep, c);
893 atomic_inc(&cache_resolve_queue_len);
894 c->next = mfc_unres_queue;
897 ipmr_do_expire_process(1);
901 * See if we can append the packet
903 if (c->mfc_un.unres.unresolved.qlen > 3) {
907 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
911 spin_unlock_bh(&mfc_unres_lock);
916 * MFC6 cache manipulation by user space
919 static int ip6mr_mfc_delete(struct mf6cctl *mfc)
922 struct mfc6_cache *c, **cp;
924 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
926 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
927 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
928 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
929 write_lock_bh(&mrt_lock);
931 write_unlock_bh(&mrt_lock);
933 kmem_cache_free(mrt_cachep, c);
940 static int ip6mr_device_event(struct notifier_block *this,
941 unsigned long event, void *ptr)
943 struct net_device *dev = ptr;
944 struct mif_device *v;
947 if (!net_eq(dev_net(dev), &init_net))
950 if (event != NETDEV_UNREGISTER)
954 for (ct = 0; ct < maxvif; ct++, v++) {
961 static struct notifier_block ip6_mr_notifier = {
962 .notifier_call = ip6mr_device_event
966 * Setup for IP multicast routing
969 int __init ip6_mr_init(void)
973 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
974 sizeof(struct mfc6_cache),
975 0, SLAB_HWCACHE_ALIGN,
980 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
981 err = register_netdevice_notifier(&ip6_mr_notifier);
984 #ifdef CONFIG_PROC_FS
986 if (!proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
988 if (!proc_net_fops_create(&init_net, "ip6_mr_cache",
990 goto proc_cache_fail;
993 #ifdef CONFIG_PROC_FS
995 proc_net_remove(&init_net, "ip6_mr_vif");
997 unregister_netdevice_notifier(&ip6_mr_notifier);
1000 del_timer(&ipmr_expire_timer);
1001 kmem_cache_destroy(mrt_cachep);
1005 void ip6_mr_cleanup(void)
1007 #ifdef CONFIG_PROC_FS
1008 proc_net_remove(&init_net, "ip6_mr_cache");
1009 proc_net_remove(&init_net, "ip6_mr_vif");
1011 unregister_netdevice_notifier(&ip6_mr_notifier);
1012 del_timer(&ipmr_expire_timer);
1013 kmem_cache_destroy(mrt_cachep);
1016 static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
1019 struct mfc6_cache *uc, *c, **cp;
1020 unsigned char ttls[MAXMIFS];
1023 memset(ttls, 255, MAXMIFS);
1024 for (i = 0; i < MAXMIFS; i++) {
1025 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1030 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1032 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
1033 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1034 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
1039 write_lock_bh(&mrt_lock);
1040 c->mf6c_parent = mfc->mf6cc_parent;
1041 ip6mr_update_thresholds(c, ttls);
1043 c->mfc_flags |= MFC_STATIC;
1044 write_unlock_bh(&mrt_lock);
1048 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1051 c = ip6mr_cache_alloc();
1055 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1056 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1057 c->mf6c_parent = mfc->mf6cc_parent;
1058 ip6mr_update_thresholds(c, ttls);
1060 c->mfc_flags |= MFC_STATIC;
1062 write_lock_bh(&mrt_lock);
1063 c->next = mfc6_cache_array[line];
1064 mfc6_cache_array[line] = c;
1065 write_unlock_bh(&mrt_lock);
1068 * Check to see if we resolved a queued list. If so we
1069 * need to send on the frames and tidy up.
1071 spin_lock_bh(&mfc_unres_lock);
1072 for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
1074 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1075 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1077 if (atomic_dec_and_test(&cache_resolve_queue_len))
1078 del_timer(&ipmr_expire_timer);
1082 spin_unlock_bh(&mfc_unres_lock);
1085 ip6mr_cache_resolve(uc, c);
1086 kmem_cache_free(mrt_cachep, uc);
1092 * Close the multicast socket, and clear the vif tables etc
1095 static void mroute_clean_tables(struct sock *sk)
1100 * Shut down all active vif entries
1102 for (i = 0; i < maxvif; i++) {
1103 if (!(vif6_table[i].flags & VIFF_STATIC))
1110 for (i = 0; i < ARRAY_SIZE(mfc6_cache_array); i++) {
1111 struct mfc6_cache *c, **cp;
1113 cp = &mfc6_cache_array[i];
1114 while ((c = *cp) != NULL) {
1115 if (c->mfc_flags & MFC_STATIC) {
1119 write_lock_bh(&mrt_lock);
1121 write_unlock_bh(&mrt_lock);
1123 kmem_cache_free(mrt_cachep, c);
1127 if (atomic_read(&cache_resolve_queue_len) != 0) {
1128 struct mfc6_cache *c;
1130 spin_lock_bh(&mfc_unres_lock);
1131 while (mfc_unres_queue != NULL) {
1132 c = mfc_unres_queue;
1133 mfc_unres_queue = c->next;
1134 spin_unlock_bh(&mfc_unres_lock);
1136 ip6mr_destroy_unres(c);
1138 spin_lock_bh(&mfc_unres_lock);
1140 spin_unlock_bh(&mfc_unres_lock);
1144 static int ip6mr_sk_init(struct sock *sk)
1149 write_lock_bh(&mrt_lock);
1150 if (likely(mroute6_socket == NULL))
1151 mroute6_socket = sk;
1154 write_unlock_bh(&mrt_lock);
1161 int ip6mr_sk_done(struct sock *sk)
1166 if (sk == mroute6_socket) {
1167 write_lock_bh(&mrt_lock);
1168 mroute6_socket = NULL;
1169 write_unlock_bh(&mrt_lock);
1171 mroute_clean_tables(sk);
1180 * Socket options and virtual interface manipulation. The whole
1181 * virtual interface system is a complete heap, but unfortunately
1182 * that's how BSD mrouted happens to think. Maybe one day with a proper
1183 * MOSPF/PIM router set up we can clean this up.
1186 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
1193 if (optname != MRT6_INIT) {
1194 if (sk != mroute6_socket && !capable(CAP_NET_ADMIN))
1200 if (sk->sk_type != SOCK_RAW ||
1201 inet_sk(sk)->num != IPPROTO_ICMPV6)
1203 if (optlen < sizeof(int))
1206 return ip6mr_sk_init(sk);
1209 return ip6mr_sk_done(sk);
1212 if (optlen < sizeof(vif))
1214 if (copy_from_user(&vif, optval, sizeof(vif)))
1216 if (vif.mif6c_mifi >= MAXMIFS)
1219 ret = mif6_add(&vif, sk == mroute6_socket);
1224 if (optlen < sizeof(mifi_t))
1226 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1229 ret = mif6_delete(mifi);
1234 * Manipulate the forwarding caches. These live
1235 * in a sort of kernel/user symbiosis.
1239 if (optlen < sizeof(mfc))
1241 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1244 if (optname == MRT6_DEL_MFC)
1245 ret = ip6mr_mfc_delete(&mfc);
1247 ret = ip6mr_mfc_add(&mfc, sk == mroute6_socket);
1252 * Control PIM assert (to activate pim will activate assert)
1257 if (get_user(v, (int __user *)optval))
1259 mroute_do_assert = !!v;
1263 #ifdef CONFIG_IPV6_PIMSM_V2
1267 if (get_user(v, (int __user *)optval))
1272 if (v != mroute_do_pim) {
1274 mroute_do_assert = v;
1276 ret = inet6_add_protocol(&pim6_protocol,
1279 ret = inet6_del_protocol(&pim6_protocol,
1290 * Spurious command, or MRT6_VERSION which you cannot
1294 return -ENOPROTOOPT;
1299 * Getsock opt support for the multicast routing system.
1302 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1312 #ifdef CONFIG_IPV6_PIMSM_V2
1314 val = mroute_do_pim;
1318 val = mroute_do_assert;
1321 return -ENOPROTOOPT;
1324 if (get_user(olr, optlen))
1327 olr = min_t(int, olr, sizeof(int));
1331 if (put_user(olr, optlen))
1333 if (copy_to_user(optval, &val, olr))
1339 * The IP multicast ioctl support routines.
1342 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1344 struct sioc_sg_req6 sr;
1345 struct sioc_mif_req6 vr;
1346 struct mif_device *vif;
1347 struct mfc6_cache *c;
1350 case SIOCGETMIFCNT_IN6:
1351 if (copy_from_user(&vr, arg, sizeof(vr)))
1353 if (vr.mifi >= maxvif)
1355 read_lock(&mrt_lock);
1356 vif = &vif6_table[vr.mifi];
1357 if (MIF_EXISTS(vr.mifi)) {
1358 vr.icount = vif->pkt_in;
1359 vr.ocount = vif->pkt_out;
1360 vr.ibytes = vif->bytes_in;
1361 vr.obytes = vif->bytes_out;
1362 read_unlock(&mrt_lock);
1364 if (copy_to_user(arg, &vr, sizeof(vr)))
1368 read_unlock(&mrt_lock);
1369 return -EADDRNOTAVAIL;
1370 case SIOCGETSGCNT_IN6:
1371 if (copy_from_user(&sr, arg, sizeof(sr)))
1374 read_lock(&mrt_lock);
1375 c = ip6mr_cache_find(&sr.src.sin6_addr, &sr.grp.sin6_addr);
1377 sr.pktcnt = c->mfc_un.res.pkt;
1378 sr.bytecnt = c->mfc_un.res.bytes;
1379 sr.wrong_if = c->mfc_un.res.wrong_if;
1380 read_unlock(&mrt_lock);
1382 if (copy_to_user(arg, &sr, sizeof(sr)))
1386 read_unlock(&mrt_lock);
1387 return -EADDRNOTAVAIL;
1389 return -ENOIOCTLCMD;
1394 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1396 IP6_INC_STATS_BH(dev_net(skb->dst->dev), ip6_dst_idev(skb->dst),
1397 IPSTATS_MIB_OUTFORWDATAGRAMS);
1398 return dst_output(skb);
1402 * Processing handlers for ip6mr_forward
1405 static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1407 struct ipv6hdr *ipv6h;
1408 struct mif_device *vif = &vif6_table[vifi];
1409 struct net_device *dev;
1410 struct dst_entry *dst;
1413 if (vif->dev == NULL)
1416 #ifdef CONFIG_IPV6_PIMSM_V2
1417 if (vif->flags & MIFF_REGISTER) {
1419 vif->bytes_out += skb->len;
1420 vif->dev->stats.tx_bytes += skb->len;
1421 vif->dev->stats.tx_packets++;
1422 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT);
1428 ipv6h = ipv6_hdr(skb);
1430 fl = (struct flowi) {
1433 { .daddr = ipv6h->daddr, }
1437 dst = ip6_route_output(&init_net, NULL, &fl);
1441 dst_release(skb->dst);
1445 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1446 * not only before forwarding, but after forwarding on all output
1447 * interfaces. It is clear, if mrouter runs a multicasting
1448 * program, it should receive packets not depending to what interface
1449 * program is joined.
1450 * If we will not make it, the program will have to join on all
1451 * interfaces. On the other hand, multihoming host (or router, but
1452 * not mrouter) cannot join to more than one interface - it will
1453 * result in receiving multiple packets.
1458 vif->bytes_out += skb->len;
1460 /* We are about to write */
1461 /* XXX: extension headers? */
1462 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1465 ipv6h = ipv6_hdr(skb);
1468 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1470 return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
1471 ip6mr_forward2_finish);
1478 static int ip6mr_find_vif(struct net_device *dev)
1481 for (ct = maxvif - 1; ct >= 0; ct--) {
1482 if (vif6_table[ct].dev == dev)
1488 static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1493 vif = cache->mf6c_parent;
1494 cache->mfc_un.res.pkt++;
1495 cache->mfc_un.res.bytes += skb->len;
1498 * Wrong interface: drop packet and (maybe) send PIM assert.
1500 if (vif6_table[vif].dev != skb->dev) {
1503 cache->mfc_un.res.wrong_if++;
1504 true_vifi = ip6mr_find_vif(skb->dev);
1506 if (true_vifi >= 0 && mroute_do_assert &&
1507 /* pimsm uses asserts, when switching from RPT to SPT,
1508 so that we cannot check that packet arrived on an oif.
1509 It is bad, but otherwise we would need to move pretty
1510 large chunk of pimd to kernel. Ough... --ANK
1512 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1514 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1515 cache->mfc_un.res.last_assert = jiffies;
1516 ip6mr_cache_report(skb, true_vifi, MRT6MSG_WRONGMIF);
1521 vif6_table[vif].pkt_in++;
1522 vif6_table[vif].bytes_in += skb->len;
1527 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1528 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1530 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1532 ip6mr_forward2(skb2, cache, psend);
1538 ip6mr_forward2(skb, cache, psend);
1549 * Multicast packets for forwarding arrive here
1552 int ip6_mr_input(struct sk_buff *skb)
1554 struct mfc6_cache *cache;
1556 read_lock(&mrt_lock);
1557 cache = ip6mr_cache_find(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1560 * No usable cache entry
1562 if (cache == NULL) {
1565 vif = ip6mr_find_vif(skb->dev);
1567 int err = ip6mr_cache_unresolved(vif, skb);
1568 read_unlock(&mrt_lock);
1572 read_unlock(&mrt_lock);
1577 ip6_mr_forward(skb, cache);
1579 read_unlock(&mrt_lock);
1586 ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1589 struct rtnexthop *nhp;
1590 struct net_device *dev = vif6_table[c->mf6c_parent].dev;
1591 u8 *b = skb_tail_pointer(skb);
1592 struct rtattr *mp_head;
1595 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1597 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1599 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1600 if (c->mfc_un.res.ttls[ct] < 255) {
1601 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1602 goto rtattr_failure;
1603 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1604 nhp->rtnh_flags = 0;
1605 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1606 nhp->rtnh_ifindex = vif6_table[ct].dev->ifindex;
1607 nhp->rtnh_len = sizeof(*nhp);
1610 mp_head->rta_type = RTA_MULTIPATH;
1611 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1612 rtm->rtm_type = RTN_MULTICAST;
1620 int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1623 struct mfc6_cache *cache;
1624 struct rt6_info *rt = (struct rt6_info *)skb->dst;
1626 read_lock(&mrt_lock);
1627 cache = ip6mr_cache_find(&rt->rt6i_src.addr, &rt->rt6i_dst.addr);
1630 struct sk_buff *skb2;
1631 struct ipv6hdr *iph;
1632 struct net_device *dev;
1636 read_unlock(&mrt_lock);
1641 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
1642 read_unlock(&mrt_lock);
1646 /* really correct? */
1647 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
1649 read_unlock(&mrt_lock);
1653 skb_reset_transport_header(skb2);
1655 skb_put(skb2, sizeof(struct ipv6hdr));
1656 skb_reset_network_header(skb2);
1658 iph = ipv6_hdr(skb2);
1661 iph->flow_lbl[0] = 0;
1662 iph->flow_lbl[1] = 0;
1663 iph->flow_lbl[2] = 0;
1664 iph->payload_len = 0;
1665 iph->nexthdr = IPPROTO_NONE;
1667 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1668 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1670 err = ip6mr_cache_unresolved(vif, skb2);
1671 read_unlock(&mrt_lock);
1676 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1677 cache->mfc_flags |= MFC_NOTIFY;
1679 err = ip6mr_fill_mroute(skb, cache, rtm);
1680 read_unlock(&mrt_lock);