2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/timer.h>
26 #include <linux/kernel.h>
27 #include <linux/fcntl.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/inetdevice.h>
34 #include <linux/igmp.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/mroute.h>
38 #include <linux/init.h>
40 #include <net/protocol.h>
41 #include <linux/skbuff.h>
46 #include <net/route.h>
47 #include <linux/notifier.h>
48 #include <linux/if_arp.h>
49 #include <linux/netfilter_ipv4.h>
51 #include <net/checksum.h>
52 #include <net/netlink.h>
55 #include <net/ip6_route.h>
56 #include <linux/mroute6.h>
57 #include <net/addrconf.h>
58 #include <linux/netfilter_ipv6.h>
60 struct sock *mroute6_socket;
63 /* Big lock, protecting vif table, mrt cache and mroute socket state.
64 Note that the changes are semaphored via rtnl_lock.
67 static DEFINE_RWLOCK(mrt_lock);
70 * Multicast router control variables
73 static struct mif_device vif6_table[MAXMIFS]; /* Devices */
76 #define MIF_EXISTS(idx) (vif6_table[idx].dev != NULL)
78 static struct mfc6_cache *mfc6_cache_array[MFC_LINES]; /* Forwarding cache */
80 static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
81 static atomic_t cache_resolve_queue_len; /* Size of unresolved */
83 /* Special spinlock for queue of unresolved entries */
84 static DEFINE_SPINLOCK(mfc_unres_lock);
86 /* We return to original Alan's scheme. Hash table of resolved
87 entries is changed only in process context and protected
88 with weak lock mrt_lock. Queue of unresolved entries is protected
89 with strong spinlock mfc_unres_lock.
91 In this case data path is free of exclusive locks at all.
94 static struct kmem_cache *mrt_cachep __read_mostly;
96 static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
97 static int ip6mr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
98 static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
100 static struct timer_list ipmr_expire_timer;
103 #ifdef CONFIG_PROC_FS
105 struct ipmr_mfc_iter {
106 struct mfc6_cache **cache;
111 static struct mfc6_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
113 struct mfc6_cache *mfc;
115 it->cache = mfc6_cache_array;
116 read_lock(&mrt_lock);
117 for (it->ct = 0; it->ct < ARRAY_SIZE(mfc6_cache_array); it->ct++)
118 for (mfc = mfc6_cache_array[it->ct]; mfc; mfc = mfc->next)
121 read_unlock(&mrt_lock);
123 it->cache = &mfc_unres_queue;
124 spin_lock_bh(&mfc_unres_lock);
125 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
128 spin_unlock_bh(&mfc_unres_lock);
138 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
141 struct ipmr_vif_iter {
145 static struct mif_device *ip6mr_vif_seq_idx(struct ipmr_vif_iter *iter,
148 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
149 if (!MIF_EXISTS(iter->ct))
152 return &vif6_table[iter->ct];
157 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
160 read_lock(&mrt_lock);
161 return (*pos ? ip6mr_vif_seq_idx(seq->private, *pos - 1)
165 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
167 struct ipmr_vif_iter *iter = seq->private;
170 if (v == SEQ_START_TOKEN)
171 return ip6mr_vif_seq_idx(iter, 0);
173 while (++iter->ct < maxvif) {
174 if (!MIF_EXISTS(iter->ct))
176 return &vif6_table[iter->ct];
181 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
184 read_unlock(&mrt_lock);
187 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
189 if (v == SEQ_START_TOKEN) {
191 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
193 const struct mif_device *vif = v;
194 const char *name = vif->dev ? vif->dev->name : "none";
197 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X\n",
199 name, vif->bytes_in, vif->pkt_in,
200 vif->bytes_out, vif->pkt_out,
206 static struct seq_operations ip6mr_vif_seq_ops = {
207 .start = ip6mr_vif_seq_start,
208 .next = ip6mr_vif_seq_next,
209 .stop = ip6mr_vif_seq_stop,
210 .show = ip6mr_vif_seq_show,
213 static int ip6mr_vif_open(struct inode *inode, struct file *file)
215 return seq_open_private(file, &ip6mr_vif_seq_ops,
216 sizeof(struct ipmr_vif_iter));
219 static struct file_operations ip6mr_vif_fops = {
220 .owner = THIS_MODULE,
221 .open = ip6mr_vif_open,
224 .release = seq_release,
227 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
229 return (*pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
233 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
235 struct mfc6_cache *mfc = v;
236 struct ipmr_mfc_iter *it = seq->private;
240 if (v == SEQ_START_TOKEN)
241 return ipmr_mfc_seq_idx(seq->private, 0);
246 if (it->cache == &mfc_unres_queue)
249 BUG_ON(it->cache != mfc6_cache_array);
251 while (++it->ct < ARRAY_SIZE(mfc6_cache_array)) {
252 mfc = mfc6_cache_array[it->ct];
257 /* exhausted cache_array, show unresolved */
258 read_unlock(&mrt_lock);
259 it->cache = &mfc_unres_queue;
262 spin_lock_bh(&mfc_unres_lock);
263 mfc = mfc_unres_queue;
268 spin_unlock_bh(&mfc_unres_lock);
274 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
276 struct ipmr_mfc_iter *it = seq->private;
278 if (it->cache == &mfc_unres_queue)
279 spin_unlock_bh(&mfc_unres_lock);
280 else if (it->cache == mfc6_cache_array)
281 read_unlock(&mrt_lock);
284 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
288 if (v == SEQ_START_TOKEN) {
292 "Iif Pkts Bytes Wrong Oifs\n");
294 const struct mfc6_cache *mfc = v;
295 const struct ipmr_mfc_iter *it = seq->private;
298 NIP6_FMT " " NIP6_FMT " %-3d %8ld %8ld %8ld",
299 NIP6(mfc->mf6c_mcastgrp), NIP6(mfc->mf6c_origin),
302 mfc->mfc_un.res.bytes,
303 mfc->mfc_un.res.wrong_if);
305 if (it->cache != &mfc_unres_queue) {
306 for (n = mfc->mfc_un.res.minvif;
307 n < mfc->mfc_un.res.maxvif; n++) {
309 mfc->mfc_un.res.ttls[n] < 255)
312 n, mfc->mfc_un.res.ttls[n]);
320 static struct seq_operations ipmr_mfc_seq_ops = {
321 .start = ipmr_mfc_seq_start,
322 .next = ipmr_mfc_seq_next,
323 .stop = ipmr_mfc_seq_stop,
324 .show = ipmr_mfc_seq_show,
327 static int ipmr_mfc_open(struct inode *inode, struct file *file)
329 return seq_open_private(file, &ipmr_mfc_seq_ops,
330 sizeof(struct ipmr_mfc_iter));
333 static struct file_operations ip6mr_mfc_fops = {
334 .owner = THIS_MODULE,
335 .open = ipmr_mfc_open,
338 .release = seq_release,
346 static int mif6_delete(int vifi)
348 struct mif_device *v;
349 struct net_device *dev;
350 if (vifi < 0 || vifi >= maxvif)
351 return -EADDRNOTAVAIL;
353 v = &vif6_table[vifi];
355 write_lock_bh(&mrt_lock);
360 write_unlock_bh(&mrt_lock);
361 return -EADDRNOTAVAIL;
364 if (vifi + 1 == maxvif) {
366 for (tmp = vifi - 1; tmp >= 0; tmp--) {
373 write_unlock_bh(&mrt_lock);
375 dev_set_allmulti(dev, -1);
377 if (v->flags & MIFF_REGISTER)
378 unregister_netdevice(dev);
384 /* Destroy an unresolved cache entry, killing queued skbs
385 and reporting error to netlink readers.
388 static void ip6mr_destroy_unres(struct mfc6_cache *c)
392 atomic_dec(&cache_resolve_queue_len);
394 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
395 if (ipv6_hdr(skb)->version == 0) {
396 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
397 nlh->nlmsg_type = NLMSG_ERROR;
398 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
399 skb_trim(skb, nlh->nlmsg_len);
400 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
401 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
406 kmem_cache_free(mrt_cachep, c);
410 /* Single timer process for all the unresolved queue. */
412 static void ipmr_do_expire_process(unsigned long dummy)
414 unsigned long now = jiffies;
415 unsigned long expires = 10 * HZ;
416 struct mfc6_cache *c, **cp;
418 cp = &mfc_unres_queue;
420 while ((c = *cp) != NULL) {
421 if (time_after(c->mfc_un.unres.expires, now)) {
423 unsigned long interval = c->mfc_un.unres.expires - now;
424 if (interval < expires)
431 ip6mr_destroy_unres(c);
434 if (atomic_read(&cache_resolve_queue_len))
435 mod_timer(&ipmr_expire_timer, jiffies + expires);
438 static void ipmr_expire_process(unsigned long dummy)
440 if (!spin_trylock(&mfc_unres_lock)) {
441 mod_timer(&ipmr_expire_timer, jiffies + 1);
445 if (atomic_read(&cache_resolve_queue_len))
446 ipmr_do_expire_process(dummy);
448 spin_unlock(&mfc_unres_lock);
451 /* Fill oifs list. It is called under write locked mrt_lock. */
453 static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
457 cache->mfc_un.res.minvif = MAXVIFS;
458 cache->mfc_un.res.maxvif = 0;
459 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
461 for (vifi = 0; vifi < maxvif; vifi++) {
462 if (MIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
463 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
464 if (cache->mfc_un.res.minvif > vifi)
465 cache->mfc_un.res.minvif = vifi;
466 if (cache->mfc_un.res.maxvif <= vifi)
467 cache->mfc_un.res.maxvif = vifi + 1;
472 static int mif6_add(struct mif6ctl *vifc, int mrtsock)
474 int vifi = vifc->mif6c_mifi;
475 struct mif_device *v = &vif6_table[vifi];
476 struct net_device *dev;
479 if (MIF_EXISTS(vifi))
482 switch (vifc->mif6c_flags) {
484 dev = dev_get_by_index(&init_net, vifc->mif6c_pifi);
486 return -EADDRNOTAVAIL;
493 dev_set_allmulti(dev, 1);
496 * Fill in the VIF structures
498 v->rate_limit = vifc->vifc_rate_limit;
499 v->flags = vifc->mif6c_flags;
501 v->flags |= VIFF_STATIC;
502 v->threshold = vifc->vifc_threshold;
507 v->link = dev->ifindex;
508 if (v->flags & MIFF_REGISTER)
509 v->link = dev->iflink;
511 /* And finish update writing critical data */
512 write_lock_bh(&mrt_lock);
515 if (vifi + 1 > maxvif)
517 write_unlock_bh(&mrt_lock);
521 static struct mfc6_cache *ip6mr_cache_find(struct in6_addr *origin, struct in6_addr *mcastgrp)
523 int line = MFC6_HASH(mcastgrp, origin);
524 struct mfc6_cache *c;
526 for (c = mfc6_cache_array[line]; c; c = c->next) {
527 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
528 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
535 * Allocate a multicast cache entry
537 static struct mfc6_cache *ip6mr_cache_alloc(void)
539 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
542 memset(c, 0, sizeof(*c));
543 c->mfc_un.res.minvif = MAXVIFS;
547 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
549 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
552 memset(c, 0, sizeof(*c));
553 skb_queue_head_init(&c->mfc_un.unres.unresolved);
554 c->mfc_un.unres.expires = jiffies + 10 * HZ;
559 * A cache entry has gone into a resolved state from queued
562 static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
567 * Play the pending entries through our router
570 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
571 if (ipv6_hdr(skb)->version == 0) {
573 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
575 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
576 nlh->nlmsg_len = skb->tail - (u8 *)nlh;
578 nlh->nlmsg_type = NLMSG_ERROR;
579 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
580 skb_trim(skb, nlh->nlmsg_len);
581 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
583 err = rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
585 ip6_mr_forward(skb, c);
590 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
591 * expects the following bizarre scheme.
593 * Called under mrt_lock.
596 static int ip6mr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
602 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
607 /* I suppose that internal messages
608 * do not require checksums */
610 skb->ip_summed = CHECKSUM_UNNECESSARY;
616 skb_put(skb, sizeof(struct ipv6hdr));
617 skb_reset_network_header(skb);
618 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
623 skb_put(skb, sizeof(*msg));
624 skb_reset_transport_header(skb);
625 msg = (struct mrt6msg *)skb_transport_header(skb);
628 msg->im6_msgtype = assert;
631 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
632 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
634 skb->dst = dst_clone(pkt->dst);
635 skb->ip_summed = CHECKSUM_UNNECESSARY;
637 skb_pull(skb, sizeof(struct ipv6hdr));
639 if (mroute6_socket == NULL) {
645 * Deliver to user space multicast routing algorithms
647 if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
649 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
657 * Queue a packet for resolution. It gets locked cache entry!
661 ip6mr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
664 struct mfc6_cache *c;
666 spin_lock_bh(&mfc_unres_lock);
667 for (c = mfc_unres_queue; c; c = c->next) {
668 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
669 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
675 * Create a new entry if allowable
678 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
679 (c = ip6mr_cache_alloc_unres()) == NULL) {
680 spin_unlock_bh(&mfc_unres_lock);
687 * Fill in the new cache entry
690 c->mf6c_origin = ipv6_hdr(skb)->saddr;
691 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
694 * Reflect first query at pim6sd
696 if ((err = ip6mr_cache_report(skb, vifi, MRT6MSG_NOCACHE)) < 0) {
697 /* If the report failed throw the cache entry
700 spin_unlock_bh(&mfc_unres_lock);
702 kmem_cache_free(mrt_cachep, c);
707 atomic_inc(&cache_resolve_queue_len);
708 c->next = mfc_unres_queue;
711 ipmr_do_expire_process(1);
715 * See if we can append the packet
717 if (c->mfc_un.unres.unresolved.qlen > 3) {
721 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
725 spin_unlock_bh(&mfc_unres_lock);
730 * MFC6 cache manipulation by user space
733 static int ip6mr_mfc_delete(struct mf6cctl *mfc)
736 struct mfc6_cache *c, **cp;
738 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
740 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
741 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
742 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
743 write_lock_bh(&mrt_lock);
745 write_unlock_bh(&mrt_lock);
747 kmem_cache_free(mrt_cachep, c);
754 static int ip6mr_device_event(struct notifier_block *this,
755 unsigned long event, void *ptr)
757 struct net_device *dev = ptr;
758 struct mif_device *v;
761 if (dev_net(dev) != &init_net)
764 if (event != NETDEV_UNREGISTER)
768 for (ct = 0; ct < maxvif; ct++, v++) {
775 static struct notifier_block ip6_mr_notifier = {
776 .notifier_call = ip6mr_device_event
780 * Setup for IP multicast routing
783 void __init ip6_mr_init(void)
785 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
786 sizeof(struct mfc6_cache),
787 0, SLAB_HWCACHE_ALIGN,
790 panic("cannot allocate ip6_mrt_cache");
792 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
793 register_netdevice_notifier(&ip6_mr_notifier);
794 #ifdef CONFIG_PROC_FS
795 proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops);
796 proc_net_fops_create(&init_net, "ip6_mr_cache", 0, &ip6mr_mfc_fops);
801 static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
804 struct mfc6_cache *uc, *c, **cp;
805 unsigned char ttls[MAXVIFS];
808 memset(ttls, 255, MAXVIFS);
809 for (i = 0; i < MAXVIFS; i++) {
810 if (IF_ISSET(i, &mfc->mf6cc_ifset))
815 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
817 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
818 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
819 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
824 write_lock_bh(&mrt_lock);
825 c->mf6c_parent = mfc->mf6cc_parent;
826 ip6mr_update_thresholds(c, ttls);
828 c->mfc_flags |= MFC_STATIC;
829 write_unlock_bh(&mrt_lock);
833 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
836 c = ip6mr_cache_alloc();
840 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
841 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
842 c->mf6c_parent = mfc->mf6cc_parent;
843 ip6mr_update_thresholds(c, ttls);
845 c->mfc_flags |= MFC_STATIC;
847 write_lock_bh(&mrt_lock);
848 c->next = mfc6_cache_array[line];
849 mfc6_cache_array[line] = c;
850 write_unlock_bh(&mrt_lock);
853 * Check to see if we resolved a queued list. If so we
854 * need to send on the frames and tidy up.
856 spin_lock_bh(&mfc_unres_lock);
857 for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
859 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
860 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
862 if (atomic_dec_and_test(&cache_resolve_queue_len))
863 del_timer(&ipmr_expire_timer);
867 spin_unlock_bh(&mfc_unres_lock);
870 ip6mr_cache_resolve(uc, c);
871 kmem_cache_free(mrt_cachep, uc);
877 * Close the multicast socket, and clear the vif tables etc
880 static void mroute_clean_tables(struct sock *sk)
885 * Shut down all active vif entries
887 for (i = 0; i < maxvif; i++) {
888 if (!(vif6_table[i].flags & VIFF_STATIC))
895 for (i = 0; i < ARRAY_SIZE(mfc6_cache_array); i++) {
896 struct mfc6_cache *c, **cp;
898 cp = &mfc6_cache_array[i];
899 while ((c = *cp) != NULL) {
900 if (c->mfc_flags & MFC_STATIC) {
904 write_lock_bh(&mrt_lock);
906 write_unlock_bh(&mrt_lock);
908 kmem_cache_free(mrt_cachep, c);
912 if (atomic_read(&cache_resolve_queue_len) != 0) {
913 struct mfc6_cache *c;
915 spin_lock_bh(&mfc_unres_lock);
916 while (mfc_unres_queue != NULL) {
918 mfc_unres_queue = c->next;
919 spin_unlock_bh(&mfc_unres_lock);
921 ip6mr_destroy_unres(c);
923 spin_lock_bh(&mfc_unres_lock);
925 spin_unlock_bh(&mfc_unres_lock);
929 static int ip6mr_sk_init(struct sock *sk)
934 write_lock_bh(&mrt_lock);
935 if (likely(mroute6_socket == NULL))
939 write_unlock_bh(&mrt_lock);
946 int ip6mr_sk_done(struct sock *sk)
951 if (sk == mroute6_socket) {
952 write_lock_bh(&mrt_lock);
953 mroute6_socket = NULL;
954 write_unlock_bh(&mrt_lock);
956 mroute_clean_tables(sk);
965 * Socket options and virtual interface manipulation. The whole
966 * virtual interface system is a complete heap, but unfortunately
967 * that's how BSD mrouted happens to think. Maybe one day with a proper
968 * MOSPF/PIM router set up we can clean this up.
971 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
978 if (optname != MRT6_INIT) {
979 if (sk != mroute6_socket && !capable(CAP_NET_ADMIN))
985 if (sk->sk_type != SOCK_RAW ||
986 inet_sk(sk)->num != IPPROTO_ICMPV6)
988 if (optlen < sizeof(int))
991 return ip6mr_sk_init(sk);
994 return ip6mr_sk_done(sk);
997 if (optlen < sizeof(vif))
999 if (copy_from_user(&vif, optval, sizeof(vif)))
1001 if (vif.mif6c_mifi >= MAXVIFS)
1004 ret = mif6_add(&vif, sk == mroute6_socket);
1009 if (optlen < sizeof(mifi_t))
1011 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1014 ret = mif6_delete(mifi);
1019 * Manipulate the forwarding caches. These live
1020 * in a sort of kernel/user symbiosis.
1024 if (optlen < sizeof(mfc))
1026 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1029 if (optname == MRT6_DEL_MFC)
1030 ret = ip6mr_mfc_delete(&mfc);
1032 ret = ip6mr_mfc_add(&mfc, sk == mroute6_socket);
1037 * Spurious command, or MRT_VERSION which you cannot
1041 return -ENOPROTOOPT;
1046 * Getsock opt support for the multicast routing system.
1049 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1060 return -ENOPROTOOPT;
1063 if (get_user(olr, optlen))
1066 olr = min_t(int, olr, sizeof(int));
1070 if (put_user(olr, optlen))
1072 if (copy_to_user(optval, &val, olr))
1078 * The IP multicast ioctl support routines.
1081 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1083 struct sioc_sg_req6 sr;
1084 struct sioc_mif_req6 vr;
1085 struct mif_device *vif;
1086 struct mfc6_cache *c;
1089 case SIOCGETMIFCNT_IN6:
1090 if (copy_from_user(&vr, arg, sizeof(vr)))
1092 if (vr.mifi >= maxvif)
1094 read_lock(&mrt_lock);
1095 vif = &vif6_table[vr.mifi];
1096 if (MIF_EXISTS(vr.mifi)) {
1097 vr.icount = vif->pkt_in;
1098 vr.ocount = vif->pkt_out;
1099 vr.ibytes = vif->bytes_in;
1100 vr.obytes = vif->bytes_out;
1101 read_unlock(&mrt_lock);
1103 if (copy_to_user(arg, &vr, sizeof(vr)))
1107 read_unlock(&mrt_lock);
1108 return -EADDRNOTAVAIL;
1109 case SIOCGETSGCNT_IN6:
1110 if (copy_from_user(&sr, arg, sizeof(sr)))
1113 read_lock(&mrt_lock);
1114 c = ip6mr_cache_find(&sr.src.sin6_addr, &sr.grp.sin6_addr);
1116 sr.pktcnt = c->mfc_un.res.pkt;
1117 sr.bytecnt = c->mfc_un.res.bytes;
1118 sr.wrong_if = c->mfc_un.res.wrong_if;
1119 read_unlock(&mrt_lock);
1121 if (copy_to_user(arg, &sr, sizeof(sr)))
1125 read_unlock(&mrt_lock);
1126 return -EADDRNOTAVAIL;
1128 return -ENOIOCTLCMD;
1133 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1136 return dst_output(skb);
1140 * Processing handlers for ip6mr_forward
1143 static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1145 struct ipv6hdr *ipv6h;
1146 struct mif_device *vif = &vif6_table[vifi];
1147 struct net_device *dev;
1148 struct dst_entry *dst;
1151 if (vif->dev == NULL)
1154 ipv6h = ipv6_hdr(skb);
1156 fl = (struct flowi) {
1159 { .daddr = ipv6h->daddr, }
1163 dst = ip6_route_output(&init_net, NULL, &fl);
1167 dst_release(skb->dst);
1171 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1172 * not only before forwarding, but after forwarding on all output
1173 * interfaces. It is clear, if mrouter runs a multicasting
1174 * program, it should receive packets not depending to what interface
1175 * program is joined.
1176 * If we will not make it, the program will have to join on all
1177 * interfaces. On the other hand, multihoming host (or router, but
1178 * not mrouter) cannot join to more than one interface - it will
1179 * result in receiving multiple packets.
1184 vif->bytes_out += skb->len;
1186 /* We are about to write */
1187 /* XXX: extension headers? */
1188 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1191 ipv6h = ipv6_hdr(skb);
1194 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1196 return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
1197 ip6mr_forward2_finish);
1204 static int ip6mr_find_vif(struct net_device *dev)
1207 for (ct = maxvif - 1; ct >= 0; ct--) {
1208 if (vif6_table[ct].dev == dev)
1214 static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1219 vif = cache->mf6c_parent;
1220 cache->mfc_un.res.pkt++;
1221 cache->mfc_un.res.bytes += skb->len;
1223 vif6_table[vif].pkt_in++;
1224 vif6_table[vif].bytes_in += skb->len;
1229 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1230 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1232 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1234 ip6mr_forward2(skb2, cache, psend);
1240 ip6mr_forward2(skb, cache, psend);
1250 * Multicast packets for forwarding arrive here
1253 int ip6_mr_input(struct sk_buff *skb)
1255 struct mfc6_cache *cache;
1257 read_lock(&mrt_lock);
1258 cache = ip6mr_cache_find(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1261 * No usable cache entry
1263 if (cache == NULL) {
1266 vif = ip6mr_find_vif(skb->dev);
1268 int err = ip6mr_cache_unresolved(vif, skb);
1269 read_unlock(&mrt_lock);
1273 read_unlock(&mrt_lock);
1278 ip6_mr_forward(skb, cache);
1280 read_unlock(&mrt_lock);
1287 ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1290 struct rtnexthop *nhp;
1291 struct net_device *dev = vif6_table[c->mf6c_parent].dev;
1293 struct rtattr *mp_head;
1296 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1298 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1300 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1301 if (c->mfc_un.res.ttls[ct] < 255) {
1302 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1303 goto rtattr_failure;
1304 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1305 nhp->rtnh_flags = 0;
1306 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1307 nhp->rtnh_ifindex = vif6_table[ct].dev->ifindex;
1308 nhp->rtnh_len = sizeof(*nhp);
1311 mp_head->rta_type = RTA_MULTIPATH;
1312 mp_head->rta_len = skb->tail - (u8 *)mp_head;
1313 rtm->rtm_type = RTN_MULTICAST;
1321 int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1324 struct mfc6_cache *cache;
1325 struct rt6_info *rt = (struct rt6_info *)skb->dst;
1327 read_lock(&mrt_lock);
1328 cache = ip6mr_cache_find(&rt->rt6i_src.addr, &rt->rt6i_dst.addr);
1331 struct sk_buff *skb2;
1332 struct ipv6hdr *iph;
1333 struct net_device *dev;
1337 read_unlock(&mrt_lock);
1342 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
1343 read_unlock(&mrt_lock);
1347 /* really correct? */
1348 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
1350 read_unlock(&mrt_lock);
1354 skb_reset_transport_header(skb2);
1356 skb_put(skb2, sizeof(struct ipv6hdr));
1357 skb_reset_network_header(skb2);
1359 iph = ipv6_hdr(skb2);
1362 iph->flow_lbl[0] = 0;
1363 iph->flow_lbl[1] = 0;
1364 iph->flow_lbl[2] = 0;
1365 iph->payload_len = 0;
1366 iph->nexthdr = IPPROTO_NONE;
1368 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1369 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1371 err = ip6mr_cache_unresolved(vif, skb2);
1372 read_unlock(&mrt_lock);
1377 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1378 cache->mfc_flags |= MFC_NOTIFY;
1380 err = ip6mr_fill_mroute(skb, cache, rtm);
1381 read_unlock(&mrt_lock);