2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
14 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
17 * Alan Cox : Verify area fixes.
18 * Alan Cox : cli() protects routing changes
19 * Rui Oliveira : ICMP routing table updates
20 * (rco@di.uminho.pt) Routing table insertion and update
21 * Linus Torvalds : Rewrote bits to be sensible
22 * Alan Cox : Added BSD route gw semantics
23 * Alan Cox : Super /proc >4K
24 * Alan Cox : MTU in route table
25 * Alan Cox : MSS actually. Also added the window
27 * Sam Lantinga : Fixed route matching in rt_del()
28 * Alan Cox : Routing cache support.
29 * Alan Cox : Removed compatibility cruft.
30 * Alan Cox : RTF_REJECT support.
31 * Alan Cox : TCP irtt support.
32 * Jonathan Naylor : Added Metric support.
33 * Miquel van Smoorenburg : BSD API fixes.
34 * Miquel van Smoorenburg : Metrics.
35 * Alan Cox : Use __u32 properly
36 * Alan Cox : Aligned routing errors more closely with BSD
37 * our system is still very different.
38 * Alan Cox : Faster /proc handling
39 * Alexey Kuznetsov : Massive rework to support tree based routing,
40 * routing caches and better behaviour.
42 * Olaf Erb : irtt wasn't being copied right.
43 * Bjorn Ekwall : Kerneld route support.
44 * Alan Cox : Multicast fixed (I hope)
45 * Pavel Krauz : Limited broadcast fixed
46 * Mike McLagan : Routing by source
47 * Alexey Kuznetsov : End of old history. Split to fib.c and
48 * route.c and rewritten from scratch.
49 * Andi Kleen : Load-limit warning messages.
50 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
51 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
52 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
53 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
54 * Marc Boucher : routing by fwmark
55 * Robert Olsson : Added rt_cache statistics
56 * Arnaldo C. Melo : Convert proc stuff to seq_file
57 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
58 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
59 * Ilia Sotnikov : Removed TOS from hash calculations
61 * This program is free software; you can redistribute it and/or
62 * modify it under the terms of the GNU General Public License
63 * as published by the Free Software Foundation; either version
64 * 2 of the License, or (at your option) any later version.
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
70 #include <linux/bitops.h>
71 #include <linux/types.h>
72 #include <linux/kernel.h>
74 #include <linux/bootmem.h>
75 #include <linux/string.h>
76 #include <linux/socket.h>
77 #include <linux/sockios.h>
78 #include <linux/errno.h>
80 #include <linux/inet.h>
81 #include <linux/netdevice.h>
82 #include <linux/proc_fs.h>
83 #include <linux/init.h>
84 #include <linux/workqueue.h>
85 #include <linux/skbuff.h>
86 #include <linux/inetdevice.h>
87 #include <linux/igmp.h>
88 #include <linux/pkt_sched.h>
89 #include <linux/mroute.h>
90 #include <linux/netfilter_ipv4.h>
91 #include <linux/random.h>
92 #include <linux/jhash.h>
93 #include <linux/rcupdate.h>
94 #include <linux/times.h>
96 #include <net/net_namespace.h>
97 #include <net/protocol.h>
99 #include <net/route.h>
100 #include <net/inetpeer.h>
101 #include <net/sock.h>
102 #include <net/ip_fib.h>
105 #include <net/icmp.h>
106 #include <net/xfrm.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
110 #include <linux/sysctl.h>
113 #define RT_FL_TOS(oldflp) \
114 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
116 #define IP_MAX_MTU 0xFFF0
118 #define RT_GC_TIMEOUT (300*HZ)
120 static int ip_rt_min_delay = 2 * HZ;
121 static int ip_rt_max_delay = 10 * HZ;
122 static int ip_rt_max_size;
123 static int ip_rt_gc_timeout = RT_GC_TIMEOUT;
124 static int ip_rt_gc_interval = 60 * HZ;
125 static int ip_rt_gc_min_interval = HZ / 2;
126 static int ip_rt_redirect_number = 9;
127 static int ip_rt_redirect_load = HZ / 50;
128 static int ip_rt_redirect_silence = ((HZ / 50) << (9 + 1));
129 static int ip_rt_error_cost = HZ;
130 static int ip_rt_error_burst = 5 * HZ;
131 static int ip_rt_gc_elasticity = 8;
132 static int ip_rt_mtu_expires = 10 * 60 * HZ;
133 static int ip_rt_min_pmtu = 512 + 20 + 20;
134 static int ip_rt_min_advmss = 256;
135 static int ip_rt_secret_interval = 10 * 60 * HZ;
136 static int ip_rt_flush_expected;
137 static unsigned long rt_deadline;
139 #define RTprint(a...) printk(KERN_DEBUG a)
141 static struct timer_list rt_flush_timer;
142 static void rt_worker_func(struct work_struct *work);
143 static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
144 static struct timer_list rt_secret_timer;
147 * Interface to generic destination cache.
150 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
151 static void ipv4_dst_destroy(struct dst_entry *dst);
152 static void ipv4_dst_ifdown(struct dst_entry *dst,
153 struct net_device *dev, int how);
154 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
155 static void ipv4_link_failure(struct sk_buff *skb);
156 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
157 static int rt_garbage_collect(void);
160 static struct dst_ops ipv4_dst_ops = {
162 .protocol = __constant_htons(ETH_P_IP),
163 .gc = rt_garbage_collect,
164 .check = ipv4_dst_check,
165 .destroy = ipv4_dst_destroy,
166 .ifdown = ipv4_dst_ifdown,
167 .negative_advice = ipv4_negative_advice,
168 .link_failure = ipv4_link_failure,
169 .update_pmtu = ip_rt_update_pmtu,
170 .local_out = ip_local_out,
171 .entry_size = sizeof(struct rtable),
174 #define ECN_OR_COST(class) TC_PRIO_##class
176 const __u8 ip_tos2prio[16] = {
180 ECN_OR_COST(BESTEFFORT),
186 ECN_OR_COST(INTERACTIVE),
188 ECN_OR_COST(INTERACTIVE),
189 TC_PRIO_INTERACTIVE_BULK,
190 ECN_OR_COST(INTERACTIVE_BULK),
191 TC_PRIO_INTERACTIVE_BULK,
192 ECN_OR_COST(INTERACTIVE_BULK)
200 /* The locking scheme is rather straight forward:
202 * 1) Read-Copy Update protects the buckets of the central route hash.
203 * 2) Only writers remove entries, and they hold the lock
204 * as they look at rtable reference counts.
205 * 3) Only readers acquire references to rtable entries,
206 * they do so with atomic increments and with the
210 struct rt_hash_bucket {
211 struct rtable *chain;
213 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
214 defined(CONFIG_PROVE_LOCKING)
216 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
217 * The size of this table is a power of two and depends on the number of CPUS.
218 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
220 #ifdef CONFIG_LOCKDEP
221 # define RT_HASH_LOCK_SZ 256
224 # define RT_HASH_LOCK_SZ 4096
226 # define RT_HASH_LOCK_SZ 2048
228 # define RT_HASH_LOCK_SZ 1024
230 # define RT_HASH_LOCK_SZ 512
232 # define RT_HASH_LOCK_SZ 256
236 static spinlock_t *rt_hash_locks;
237 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
238 # define rt_hash_lock_init() { \
240 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ, GFP_KERNEL); \
241 if (!rt_hash_locks) panic("IP: failed to allocate rt_hash_locks\n"); \
242 for (i = 0; i < RT_HASH_LOCK_SZ; i++) \
243 spin_lock_init(&rt_hash_locks[i]); \
246 # define rt_hash_lock_addr(slot) NULL
247 # define rt_hash_lock_init()
250 static struct rt_hash_bucket *rt_hash_table;
251 static unsigned rt_hash_mask;
252 static unsigned int rt_hash_log;
253 static unsigned int rt_hash_rnd;
255 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
256 #define RT_CACHE_STAT_INC(field) \
257 (__raw_get_cpu_var(rt_cache_stat).field++)
259 static int rt_intern_hash(unsigned hash, struct rtable *rth,
260 struct rtable **res);
262 static unsigned int rt_hash_code(u32 daddr, u32 saddr)
264 return (jhash_2words(daddr, saddr, rt_hash_rnd)
268 #define rt_hash(daddr, saddr, idx) \
269 rt_hash_code((__force u32)(__be32)(daddr),\
270 (__force u32)(__be32)(saddr) ^ ((idx) << 5))
272 #ifdef CONFIG_PROC_FS
273 struct rt_cache_iter_state {
277 static struct rtable *rt_cache_get_first(struct seq_file *seq)
279 struct rtable *r = NULL;
280 struct rt_cache_iter_state *st = seq->private;
282 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
284 r = rt_hash_table[st->bucket].chain;
287 rcu_read_unlock_bh();
289 return rcu_dereference(r);
292 static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
294 struct rt_cache_iter_state *st = seq->private;
296 r = r->u.dst.rt_next;
298 rcu_read_unlock_bh();
299 if (--st->bucket < 0)
302 r = rt_hash_table[st->bucket].chain;
304 return rcu_dereference(r);
307 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
309 struct rtable *r = rt_cache_get_first(seq);
312 while (pos && (r = rt_cache_get_next(seq, r)))
314 return pos ? NULL : r;
317 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
319 return *pos ? rt_cache_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
322 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
324 struct rtable *r = NULL;
326 if (v == SEQ_START_TOKEN)
327 r = rt_cache_get_first(seq);
329 r = rt_cache_get_next(seq, v);
334 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
336 if (v && v != SEQ_START_TOKEN)
337 rcu_read_unlock_bh();
340 static int rt_cache_seq_show(struct seq_file *seq, void *v)
342 if (v == SEQ_START_TOKEN)
343 seq_printf(seq, "%-127s\n",
344 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
345 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
348 struct rtable *r = v;
351 sprintf(temp, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
352 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
353 r->u.dst.dev ? r->u.dst.dev->name : "*",
354 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
355 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
356 r->u.dst.__use, 0, (unsigned long)r->rt_src,
357 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
358 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
359 dst_metric(&r->u.dst, RTAX_WINDOW),
360 (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
361 dst_metric(&r->u.dst, RTAX_RTTVAR)),
363 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
364 r->u.dst.hh ? (r->u.dst.hh->hh_output ==
367 seq_printf(seq, "%-127s\n", temp);
372 static const struct seq_operations rt_cache_seq_ops = {
373 .start = rt_cache_seq_start,
374 .next = rt_cache_seq_next,
375 .stop = rt_cache_seq_stop,
376 .show = rt_cache_seq_show,
379 static int rt_cache_seq_open(struct inode *inode, struct file *file)
381 return seq_open_private(file, &rt_cache_seq_ops,
382 sizeof(struct rt_cache_iter_state));
385 static const struct file_operations rt_cache_seq_fops = {
386 .owner = THIS_MODULE,
387 .open = rt_cache_seq_open,
390 .release = seq_release_private,
394 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
399 return SEQ_START_TOKEN;
401 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
402 if (!cpu_possible(cpu))
405 return &per_cpu(rt_cache_stat, cpu);
410 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
414 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
415 if (!cpu_possible(cpu))
418 return &per_cpu(rt_cache_stat, cpu);
424 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
429 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
431 struct rt_cache_stat *st = v;
433 if (v == SEQ_START_TOKEN) {
434 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
438 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
439 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
440 atomic_read(&ipv4_dst_ops.entries),
463 static const struct seq_operations rt_cpu_seq_ops = {
464 .start = rt_cpu_seq_start,
465 .next = rt_cpu_seq_next,
466 .stop = rt_cpu_seq_stop,
467 .show = rt_cpu_seq_show,
471 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
473 return seq_open(file, &rt_cpu_seq_ops);
476 static const struct file_operations rt_cpu_seq_fops = {
477 .owner = THIS_MODULE,
478 .open = rt_cpu_seq_open,
481 .release = seq_release,
484 #ifdef CONFIG_NET_CLS_ROUTE
485 static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
486 int length, int *eof, void *data)
490 if ((offset & 3) || (length & 3))
493 if (offset >= sizeof(struct ip_rt_acct) * 256) {
498 if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
499 length = sizeof(struct ip_rt_acct) * 256 - offset;
503 offset /= sizeof(u32);
506 u32 *dst = (u32 *) buffer;
509 memset(dst, 0, length);
511 for_each_possible_cpu(i) {
515 src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset;
516 for (j = 0; j < length/4; j++)
524 static __init int ip_rt_proc_init(struct net *net)
526 struct proc_dir_entry *pde;
528 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
533 pde = create_proc_entry("rt_cache", S_IRUGO, net->proc_net_stat);
537 pde->proc_fops = &rt_cpu_seq_fops;
539 #ifdef CONFIG_NET_CLS_ROUTE
540 pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
541 ip_rt_acct_read, NULL);
547 #ifdef CONFIG_NET_CLS_ROUTE
549 remove_proc_entry("rt_cache", net->proc_net_stat);
552 remove_proc_entry("rt_cache", net->proc_net);
557 static inline int ip_rt_proc_init(struct net *net)
561 #endif /* CONFIG_PROC_FS */
563 static __inline__ void rt_free(struct rtable *rt)
565 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
568 static __inline__ void rt_drop(struct rtable *rt)
571 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
574 static __inline__ int rt_fast_clean(struct rtable *rth)
576 /* Kill broadcast/multicast entries very aggresively, if they
577 collide in hash table with more useful entries */
578 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
579 rth->fl.iif && rth->u.dst.rt_next;
582 static __inline__ int rt_valuable(struct rtable *rth)
584 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
588 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
593 if (atomic_read(&rth->u.dst.__refcnt))
597 if (rth->u.dst.expires &&
598 time_after_eq(jiffies, rth->u.dst.expires))
601 age = jiffies - rth->u.dst.lastuse;
603 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
604 (age <= tmo2 && rt_valuable(rth)))
610 /* Bits of score are:
612 * 30: not quite useless
613 * 29..0: usage counter
615 static inline u32 rt_score(struct rtable *rt)
617 u32 score = jiffies - rt->u.dst.lastuse;
619 score = ~score & ~(3<<30);
625 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
631 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
633 return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
634 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
635 (fl1->mark ^ fl2->mark) |
636 (*(u16 *)&fl1->nl_u.ip4_u.tos ^
637 *(u16 *)&fl2->nl_u.ip4_u.tos) |
638 (fl1->oif ^ fl2->oif) |
639 (fl1->iif ^ fl2->iif)) == 0;
643 * Perform a full scan of hash table and free all entries.
644 * Can be called by a softirq or a process.
645 * In the later case, we want to be reschedule if necessary
647 static void rt_do_flush(int process_context)
650 struct rtable *rth, *next;
652 for (i = 0; i <= rt_hash_mask; i++) {
653 if (process_context && need_resched())
655 rth = rt_hash_table[i].chain;
659 spin_lock_bh(rt_hash_lock_addr(i));
660 rth = rt_hash_table[i].chain;
661 rt_hash_table[i].chain = NULL;
662 spin_unlock_bh(rt_hash_lock_addr(i));
664 for (; rth; rth = next) {
665 next = rth->u.dst.rt_next;
671 static void rt_check_expire(void)
673 static unsigned int rover;
674 unsigned int i = rover, goal;
675 struct rtable *rth, **rthp;
678 mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
679 if (ip_rt_gc_timeout > 1)
680 do_div(mult, ip_rt_gc_timeout);
681 goal = (unsigned int)mult;
682 if (goal > rt_hash_mask)
683 goal = rt_hash_mask + 1;
684 for (; goal > 0; goal--) {
685 unsigned long tmo = ip_rt_gc_timeout;
687 i = (i + 1) & rt_hash_mask;
688 rthp = &rt_hash_table[i].chain;
695 spin_lock_bh(rt_hash_lock_addr(i));
696 while ((rth = *rthp) != NULL) {
697 if (rth->u.dst.expires) {
698 /* Entry is expired even if it is in use */
699 if (time_before_eq(jiffies, rth->u.dst.expires)) {
701 rthp = &rth->u.dst.rt_next;
704 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
706 rthp = &rth->u.dst.rt_next;
710 /* Cleanup aged off entries. */
711 *rthp = rth->u.dst.rt_next;
714 spin_unlock_bh(rt_hash_lock_addr(i));
720 * rt_worker_func() is run in process context.
721 * If a whole flush was scheduled, it is done.
722 * Else, we call rt_check_expire() to scan part of the hash table
724 static void rt_worker_func(struct work_struct *work)
726 if (ip_rt_flush_expected) {
727 ip_rt_flush_expected = 0;
731 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
734 /* This can run from both BH and non-BH contexts, the latter
735 * in the case of a forced flush event.
737 static void rt_run_flush(unsigned long process_context)
741 get_random_bytes(&rt_hash_rnd, 4);
743 rt_do_flush(process_context);
746 static DEFINE_SPINLOCK(rt_flush_lock);
748 void rt_cache_flush(int delay)
750 unsigned long now = jiffies;
751 int user_mode = !in_softirq();
754 delay = ip_rt_min_delay;
756 spin_lock_bh(&rt_flush_lock);
758 if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) {
759 long tmo = (long)(rt_deadline - now);
761 /* If flush timer is already running
762 and flush request is not immediate (delay > 0):
764 if deadline is not achieved, prolongate timer to "delay",
765 otherwise fire it at deadline time.
768 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
776 spin_unlock_bh(&rt_flush_lock);
777 rt_run_flush(user_mode);
781 if (rt_deadline == 0)
782 rt_deadline = now + ip_rt_max_delay;
784 mod_timer(&rt_flush_timer, now+delay);
785 spin_unlock_bh(&rt_flush_lock);
789 * We change rt_hash_rnd and ask next rt_worker_func() invocation
790 * to perform a flush in process context
792 static void rt_secret_rebuild(unsigned long dummy)
794 get_random_bytes(&rt_hash_rnd, 4);
795 ip_rt_flush_expected = 1;
796 cancel_delayed_work(&expires_work);
797 schedule_delayed_work(&expires_work, HZ/10);
798 mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval);
802 Short description of GC goals.
804 We want to build algorithm, which will keep routing cache
805 at some equilibrium point, when number of aged off entries
806 is kept approximately equal to newly generated ones.
808 Current expiration strength is variable "expire".
809 We try to adjust it dynamically, so that if networking
810 is idle expires is large enough to keep enough of warm entries,
811 and when load increases it reduces to limit cache size.
814 static int rt_garbage_collect(void)
816 static unsigned long expire = RT_GC_TIMEOUT;
817 static unsigned long last_gc;
819 static int equilibrium;
820 struct rtable *rth, **rthp;
821 unsigned long now = jiffies;
825 * Garbage collection is pretty expensive,
826 * do not make it too frequently.
829 RT_CACHE_STAT_INC(gc_total);
831 if (now - last_gc < ip_rt_gc_min_interval &&
832 atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
833 RT_CACHE_STAT_INC(gc_ignored);
837 /* Calculate number of entries, which we want to expire now. */
838 goal = atomic_read(&ipv4_dst_ops.entries) -
839 (ip_rt_gc_elasticity << rt_hash_log);
841 if (equilibrium < ipv4_dst_ops.gc_thresh)
842 equilibrium = ipv4_dst_ops.gc_thresh;
843 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
845 equilibrium += min_t(unsigned int, goal / 2, rt_hash_mask + 1);
846 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
849 /* We are in dangerous area. Try to reduce cache really
852 goal = max_t(unsigned int, goal / 2, rt_hash_mask + 1);
853 equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
856 if (now - last_gc >= ip_rt_gc_min_interval)
867 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
868 unsigned long tmo = expire;
870 k = (k + 1) & rt_hash_mask;
871 rthp = &rt_hash_table[k].chain;
872 spin_lock_bh(rt_hash_lock_addr(k));
873 while ((rth = *rthp) != NULL) {
874 if (!rt_may_expire(rth, tmo, expire)) {
876 rthp = &rth->u.dst.rt_next;
879 *rthp = rth->u.dst.rt_next;
883 spin_unlock_bh(rt_hash_lock_addr(k));
892 /* Goal is not achieved. We stop process if:
894 - if expire reduced to zero. Otherwise, expire is halfed.
895 - if table is not full.
896 - if we are called from interrupt.
897 - jiffies check is just fallback/debug loop breaker.
898 We will not spin here for long time in any case.
901 RT_CACHE_STAT_INC(gc_goal_miss);
907 #if RT_CACHE_DEBUG >= 2
908 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
909 atomic_read(&ipv4_dst_ops.entries), goal, i);
912 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
914 } while (!in_softirq() && time_before_eq(jiffies, now));
916 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
919 printk(KERN_WARNING "dst cache overflow\n");
920 RT_CACHE_STAT_INC(gc_dst_overflow);
924 expire += ip_rt_gc_min_interval;
925 if (expire > ip_rt_gc_timeout ||
926 atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
927 expire = ip_rt_gc_timeout;
928 #if RT_CACHE_DEBUG >= 2
929 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
930 atomic_read(&ipv4_dst_ops.entries), goal, rover);
935 static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
937 struct rtable *rth, **rthp;
939 struct rtable *cand, **candp;
942 int attempts = !in_softirq();
951 rthp = &rt_hash_table[hash].chain;
953 spin_lock_bh(rt_hash_lock_addr(hash));
954 while ((rth = *rthp) != NULL) {
955 if (compare_keys(&rth->fl, &rt->fl)) {
957 *rthp = rth->u.dst.rt_next;
959 * Since lookup is lockfree, the deletion
960 * must be visible to another weakly ordered CPU before
961 * the insertion at the start of the hash chain.
963 rcu_assign_pointer(rth->u.dst.rt_next,
964 rt_hash_table[hash].chain);
966 * Since lookup is lockfree, the update writes
967 * must be ordered for consistency on SMP.
969 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
971 dst_use(&rth->u.dst, now);
972 spin_unlock_bh(rt_hash_lock_addr(hash));
979 if (!atomic_read(&rth->u.dst.__refcnt)) {
980 u32 score = rt_score(rth);
982 if (score <= min_score) {
991 rthp = &rth->u.dst.rt_next;
995 /* ip_rt_gc_elasticity used to be average length of chain
996 * length, when exceeded gc becomes really aggressive.
998 * The second limit is less certain. At the moment it allows
999 * only 2 entries per bucket. We will see.
1001 if (chain_length > ip_rt_gc_elasticity) {
1002 *candp = cand->u.dst.rt_next;
1007 /* Try to bind route to arp only if it is output
1008 route or unicast forwarding path.
1010 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1011 int err = arp_bind_neighbour(&rt->u.dst);
1013 spin_unlock_bh(rt_hash_lock_addr(hash));
1015 if (err != -ENOBUFS) {
1020 /* Neighbour tables are full and nothing
1021 can be released. Try to shrink route cache,
1022 it is most likely it holds some neighbour records.
1024 if (attempts-- > 0) {
1025 int saved_elasticity = ip_rt_gc_elasticity;
1026 int saved_int = ip_rt_gc_min_interval;
1027 ip_rt_gc_elasticity = 1;
1028 ip_rt_gc_min_interval = 0;
1029 rt_garbage_collect();
1030 ip_rt_gc_min_interval = saved_int;
1031 ip_rt_gc_elasticity = saved_elasticity;
1035 if (net_ratelimit())
1036 printk(KERN_WARNING "Neighbour table overflow.\n");
1042 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1043 #if RT_CACHE_DEBUG >= 2
1044 if (rt->u.dst.rt_next) {
1046 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
1047 NIPQUAD(rt->rt_dst));
1048 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1049 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
1053 rt_hash_table[hash].chain = rt;
1054 spin_unlock_bh(rt_hash_lock_addr(hash));
1059 void rt_bind_peer(struct rtable *rt, int create)
1061 static DEFINE_SPINLOCK(rt_peer_lock);
1062 struct inet_peer *peer;
1064 peer = inet_getpeer(rt->rt_dst, create);
1066 spin_lock_bh(&rt_peer_lock);
1067 if (rt->peer == NULL) {
1071 spin_unlock_bh(&rt_peer_lock);
1077 * Peer allocation may fail only in serious out-of-memory conditions. However
1078 * we still can generate some output.
1079 * Random ID selection looks a bit dangerous because we have no chances to
1080 * select ID being unique in a reasonable period of time.
1081 * But broken packet identifier may be better than no packet at all.
1083 static void ip_select_fb_ident(struct iphdr *iph)
1085 static DEFINE_SPINLOCK(ip_fb_id_lock);
1086 static u32 ip_fallback_id;
1089 spin_lock_bh(&ip_fb_id_lock);
1090 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1091 iph->id = htons(salt & 0xFFFF);
1092 ip_fallback_id = salt;
1093 spin_unlock_bh(&ip_fb_id_lock);
1096 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1098 struct rtable *rt = (struct rtable *) dst;
1101 if (rt->peer == NULL)
1102 rt_bind_peer(rt, 1);
1104 /* If peer is attached to destination, it is never detached,
1105 so that we need not to grab a lock to dereference it.
1108 iph->id = htons(inet_getid(rt->peer, more));
1112 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1113 __builtin_return_address(0));
1115 ip_select_fb_ident(iph);
1118 static void rt_del(unsigned hash, struct rtable *rt)
1120 struct rtable **rthp;
1122 spin_lock_bh(rt_hash_lock_addr(hash));
1124 for (rthp = &rt_hash_table[hash].chain; *rthp;
1125 rthp = &(*rthp)->u.dst.rt_next)
1127 *rthp = rt->u.dst.rt_next;
1131 spin_unlock_bh(rt_hash_lock_addr(hash));
1134 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1135 __be32 saddr, struct net_device *dev)
1138 struct in_device *in_dev = in_dev_get(dev);
1139 struct rtable *rth, **rthp;
1140 __be32 skeys[2] = { saddr, 0 };
1141 int ikeys[2] = { dev->ifindex, 0 };
1142 struct netevent_redirect netevent;
1147 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1148 || MULTICAST(new_gw) || BADCLASS(new_gw) || ZERONET(new_gw))
1149 goto reject_redirect;
1151 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1152 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1153 goto reject_redirect;
1154 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1155 goto reject_redirect;
1157 if (inet_addr_type(new_gw) != RTN_UNICAST)
1158 goto reject_redirect;
1161 for (i = 0; i < 2; i++) {
1162 for (k = 0; k < 2; k++) {
1163 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]);
1165 rthp=&rt_hash_table[hash].chain;
1168 while ((rth = rcu_dereference(*rthp)) != NULL) {
1171 if (rth->fl.fl4_dst != daddr ||
1172 rth->fl.fl4_src != skeys[i] ||
1173 rth->fl.oif != ikeys[k] ||
1175 rthp = &rth->u.dst.rt_next;
1179 if (rth->rt_dst != daddr ||
1180 rth->rt_src != saddr ||
1182 rth->rt_gateway != old_gw ||
1183 rth->u.dst.dev != dev)
1186 dst_hold(&rth->u.dst);
1189 rt = dst_alloc(&ipv4_dst_ops);
1196 /* Copy all the information. */
1198 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
1199 rt->u.dst.__use = 1;
1200 atomic_set(&rt->u.dst.__refcnt, 1);
1201 rt->u.dst.child = NULL;
1203 dev_hold(rt->u.dst.dev);
1205 in_dev_hold(rt->idev);
1206 rt->u.dst.obsolete = 0;
1207 rt->u.dst.lastuse = jiffies;
1208 rt->u.dst.path = &rt->u.dst;
1209 rt->u.dst.neighbour = NULL;
1210 rt->u.dst.hh = NULL;
1211 rt->u.dst.xfrm = NULL;
1213 rt->rt_flags |= RTCF_REDIRECTED;
1215 /* Gateway is different ... */
1216 rt->rt_gateway = new_gw;
1218 /* Redirect received -> path was valid */
1219 dst_confirm(&rth->u.dst);
1222 atomic_inc(&rt->peer->refcnt);
1224 if (arp_bind_neighbour(&rt->u.dst) ||
1225 !(rt->u.dst.neighbour->nud_state &
1227 if (rt->u.dst.neighbour)
1228 neigh_event_send(rt->u.dst.neighbour, NULL);
1234 netevent.old = &rth->u.dst;
1235 netevent.new = &rt->u.dst;
1236 call_netevent_notifiers(NETEVENT_REDIRECT,
1240 if (!rt_intern_hash(hash, rt, &rt))
1253 #ifdef CONFIG_IP_ROUTE_VERBOSE
1254 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1255 printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
1256 "%u.%u.%u.%u ignored.\n"
1257 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
1258 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
1259 NIPQUAD(saddr), NIPQUAD(daddr));
1264 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1266 struct rtable *rt = (struct rtable*)dst;
1267 struct dst_entry *ret = dst;
1270 if (dst->obsolete) {
1273 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1274 rt->u.dst.expires) {
1275 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1277 #if RT_CACHE_DEBUG >= 1
1278 printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
1279 "%u.%u.%u.%u/%02x dropped\n",
1280 NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
1291 * 1. The first ip_rt_redirect_number redirects are sent
1292 * with exponential backoff, then we stop sending them at all,
1293 * assuming that the host ignores our redirects.
1294 * 2. If we did not see packets requiring redirects
1295 * during ip_rt_redirect_silence, we assume that the host
1296 * forgot redirected route and start to send redirects again.
1298 * This algorithm is much cheaper and more intelligent than dumb load limiting
1301 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1302 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1305 void ip_rt_send_redirect(struct sk_buff *skb)
1307 struct rtable *rt = (struct rtable*)skb->dst;
1308 struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1313 if (!IN_DEV_TX_REDIRECTS(in_dev))
1316 /* No redirected packets during ip_rt_redirect_silence;
1317 * reset the algorithm.
1319 if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
1320 rt->u.dst.rate_tokens = 0;
1322 /* Too many ignored redirects; do not send anything
1323 * set u.dst.rate_last to the last seen redirected packet.
1325 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1326 rt->u.dst.rate_last = jiffies;
1330 /* Check for load limit; set rate_last to the latest sent
1333 if (rt->u.dst.rate_tokens == 0 ||
1335 (rt->u.dst.rate_last +
1336 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
1337 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1338 rt->u.dst.rate_last = jiffies;
1339 ++rt->u.dst.rate_tokens;
1340 #ifdef CONFIG_IP_ROUTE_VERBOSE
1341 if (IN_DEV_LOG_MARTIANS(in_dev) &&
1342 rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1344 printk(KERN_WARNING "host %u.%u.%u.%u/if%d ignores "
1345 "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
1346 NIPQUAD(rt->rt_src), rt->rt_iif,
1347 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
1354 static int ip_error(struct sk_buff *skb)
1356 struct rtable *rt = (struct rtable*)skb->dst;
1360 switch (rt->u.dst.error) {
1365 code = ICMP_HOST_UNREACH;
1368 code = ICMP_NET_UNREACH;
1369 IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES);
1372 code = ICMP_PKT_FILTERED;
1377 rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
1378 if (rt->u.dst.rate_tokens > ip_rt_error_burst)
1379 rt->u.dst.rate_tokens = ip_rt_error_burst;
1380 rt->u.dst.rate_last = now;
1381 if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
1382 rt->u.dst.rate_tokens -= ip_rt_error_cost;
1383 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1386 out: kfree_skb(skb);
1391 * The last two values are not from the RFC but
1392 * are needed for AMPRnet AX.25 paths.
1395 static const unsigned short mtu_plateau[] =
1396 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1398 static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
1402 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1403 if (old_mtu > mtu_plateau[i])
1404 return mtu_plateau[i];
1408 unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1411 unsigned short old_mtu = ntohs(iph->tot_len);
1413 __be32 skeys[2] = { iph->saddr, 0, };
1414 __be32 daddr = iph->daddr;
1415 unsigned short est_mtu = 0;
1417 if (ipv4_config.no_pmtu_disc)
1420 for (i = 0; i < 2; i++) {
1421 unsigned hash = rt_hash(daddr, skeys[i], 0);
1424 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1425 rth = rcu_dereference(rth->u.dst.rt_next)) {
1426 if (rth->fl.fl4_dst == daddr &&
1427 rth->fl.fl4_src == skeys[i] &&
1428 rth->rt_dst == daddr &&
1429 rth->rt_src == iph->saddr &&
1431 !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
1432 unsigned short mtu = new_mtu;
1434 if (new_mtu < 68 || new_mtu >= old_mtu) {
1436 /* BSD 4.2 compatibility hack :-( */
1438 old_mtu >= rth->u.dst.metrics[RTAX_MTU-1] &&
1439 old_mtu >= 68 + (iph->ihl << 2))
1440 old_mtu -= iph->ihl << 2;
1442 mtu = guess_mtu(old_mtu);
1444 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
1445 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
1446 dst_confirm(&rth->u.dst);
1447 if (mtu < ip_rt_min_pmtu) {
1448 mtu = ip_rt_min_pmtu;
1449 rth->u.dst.metrics[RTAX_LOCK-1] |=
1452 rth->u.dst.metrics[RTAX_MTU-1] = mtu;
1453 dst_set_expires(&rth->u.dst,
1462 return est_mtu ? : new_mtu;
1465 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1467 if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= 68 &&
1468 !(dst_metric_locked(dst, RTAX_MTU))) {
1469 if (mtu < ip_rt_min_pmtu) {
1470 mtu = ip_rt_min_pmtu;
1471 dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
1473 dst->metrics[RTAX_MTU-1] = mtu;
1474 dst_set_expires(dst, ip_rt_mtu_expires);
1475 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1479 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1484 static void ipv4_dst_destroy(struct dst_entry *dst)
1486 struct rtable *rt = (struct rtable *) dst;
1487 struct inet_peer *peer = rt->peer;
1488 struct in_device *idev = rt->idev;
1501 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1504 struct rtable *rt = (struct rtable *) dst;
1505 struct in_device *idev = rt->idev;
1506 if (dev != init_net.loopback_dev && idev && idev->dev == dev) {
1507 struct in_device *loopback_idev = in_dev_get(init_net.loopback_dev);
1508 if (loopback_idev) {
1509 rt->idev = loopback_idev;
1515 static void ipv4_link_failure(struct sk_buff *skb)
1519 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1521 rt = (struct rtable *) skb->dst;
1523 dst_set_expires(&rt->u.dst, 0);
1526 static int ip_rt_bug(struct sk_buff *skb)
1528 printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
1529 NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
1530 skb->dev ? skb->dev->name : "?");
1536 We do not cache source address of outgoing interface,
1537 because it is used only by IP RR, TS and SRR options,
1538 so that it out of fast path.
1540 BTW remember: "addr" is allowed to be not aligned
1544 void ip_rt_get_source(u8 *addr, struct rtable *rt)
1547 struct fib_result res;
1549 if (rt->fl.iif == 0)
1551 else if (fib_lookup(&rt->fl, &res) == 0) {
1552 src = FIB_RES_PREFSRC(res);
1555 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
1557 memcpy(addr, &src, 4);
1560 #ifdef CONFIG_NET_CLS_ROUTE
1561 static void set_class_tag(struct rtable *rt, u32 tag)
1563 if (!(rt->u.dst.tclassid & 0xFFFF))
1564 rt->u.dst.tclassid |= tag & 0xFFFF;
1565 if (!(rt->u.dst.tclassid & 0xFFFF0000))
1566 rt->u.dst.tclassid |= tag & 0xFFFF0000;
1570 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1572 struct fib_info *fi = res->fi;
1575 if (FIB_RES_GW(*res) &&
1576 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1577 rt->rt_gateway = FIB_RES_GW(*res);
1578 memcpy(rt->u.dst.metrics, fi->fib_metrics,
1579 sizeof(rt->u.dst.metrics));
1580 if (fi->fib_mtu == 0) {
1581 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
1582 if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) &&
1583 rt->rt_gateway != rt->rt_dst &&
1584 rt->u.dst.dev->mtu > 576)
1585 rt->u.dst.metrics[RTAX_MTU-1] = 576;
1587 #ifdef CONFIG_NET_CLS_ROUTE
1588 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1591 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
1593 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1594 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1595 if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU)
1596 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1597 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0)
1598 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
1600 if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40)
1601 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1603 #ifdef CONFIG_NET_CLS_ROUTE
1604 #ifdef CONFIG_IP_MULTIPLE_TABLES
1605 set_class_tag(rt, fib_rules_tclass(res));
1607 set_class_tag(rt, itag);
1609 rt->rt_type = res->type;
1612 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1613 u8 tos, struct net_device *dev, int our)
1618 struct in_device *in_dev = in_dev_get(dev);
1621 /* Primary sanity checks. */
1626 if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||
1627 skb->protocol != htons(ETH_P_IP))
1630 if (ZERONET(saddr)) {
1631 if (!LOCAL_MCAST(daddr))
1633 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1634 } else if (fib_validate_source(saddr, 0, tos, 0,
1635 dev, &spec_dst, &itag) < 0)
1638 rth = dst_alloc(&ipv4_dst_ops);
1642 rth->u.dst.output= ip_rt_bug;
1644 atomic_set(&rth->u.dst.__refcnt, 1);
1645 rth->u.dst.flags= DST_HOST;
1646 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1647 rth->u.dst.flags |= DST_NOPOLICY;
1648 rth->fl.fl4_dst = daddr;
1649 rth->rt_dst = daddr;
1650 rth->fl.fl4_tos = tos;
1651 rth->fl.mark = skb->mark;
1652 rth->fl.fl4_src = saddr;
1653 rth->rt_src = saddr;
1654 #ifdef CONFIG_NET_CLS_ROUTE
1655 rth->u.dst.tclassid = itag;
1658 rth->fl.iif = dev->ifindex;
1659 rth->u.dst.dev = init_net.loopback_dev;
1660 dev_hold(rth->u.dst.dev);
1661 rth->idev = in_dev_get(rth->u.dst.dev);
1663 rth->rt_gateway = daddr;
1664 rth->rt_spec_dst= spec_dst;
1665 rth->rt_type = RTN_MULTICAST;
1666 rth->rt_flags = RTCF_MULTICAST;
1668 rth->u.dst.input= ip_local_deliver;
1669 rth->rt_flags |= RTCF_LOCAL;
1672 #ifdef CONFIG_IP_MROUTE
1673 if (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
1674 rth->u.dst.input = ip_mr_input;
1676 RT_CACHE_STAT_INC(in_slow_mc);
1679 hash = rt_hash(daddr, saddr, dev->ifindex);
1680 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
1692 static void ip_handle_martian_source(struct net_device *dev,
1693 struct in_device *in_dev,
1694 struct sk_buff *skb,
1698 RT_CACHE_STAT_INC(in_martian_src);
1699 #ifdef CONFIG_IP_ROUTE_VERBOSE
1700 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1702 * RFC1812 recommendation, if source is martian,
1703 * the only hint is MAC header.
1705 printk(KERN_WARNING "martian source %u.%u.%u.%u from "
1706 "%u.%u.%u.%u, on dev %s\n",
1707 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
1708 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1710 const unsigned char *p = skb_mac_header(skb);
1711 printk(KERN_WARNING "ll header: ");
1712 for (i = 0; i < dev->hard_header_len; i++, p++) {
1714 if (i < (dev->hard_header_len - 1))
1723 static inline int __mkroute_input(struct sk_buff *skb,
1724 struct fib_result* res,
1725 struct in_device *in_dev,
1726 __be32 daddr, __be32 saddr, u32 tos,
1727 struct rtable **result)
1732 struct in_device *out_dev;
1737 /* get a working reference to the output device */
1738 out_dev = in_dev_get(FIB_RES_DEV(*res));
1739 if (out_dev == NULL) {
1740 if (net_ratelimit())
1741 printk(KERN_CRIT "Bug in ip_route_input" \
1742 "_slow(). Please, report\n");
1747 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1748 in_dev->dev, &spec_dst, &itag);
1750 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1758 flags |= RTCF_DIRECTSRC;
1760 if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
1761 (IN_DEV_SHARED_MEDIA(out_dev) ||
1762 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1763 flags |= RTCF_DOREDIRECT;
1765 if (skb->protocol != htons(ETH_P_IP)) {
1766 /* Not IP (i.e. ARP). Do not create route, if it is
1767 * invalid for proxy arp. DNAT routes are always valid.
1769 if (out_dev == in_dev && !(flags & RTCF_DNAT)) {
1776 rth = dst_alloc(&ipv4_dst_ops);
1782 atomic_set(&rth->u.dst.__refcnt, 1);
1783 rth->u.dst.flags= DST_HOST;
1784 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1785 rth->u.dst.flags |= DST_NOPOLICY;
1786 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
1787 rth->u.dst.flags |= DST_NOXFRM;
1788 rth->fl.fl4_dst = daddr;
1789 rth->rt_dst = daddr;
1790 rth->fl.fl4_tos = tos;
1791 rth->fl.mark = skb->mark;
1792 rth->fl.fl4_src = saddr;
1793 rth->rt_src = saddr;
1794 rth->rt_gateway = daddr;
1796 rth->fl.iif = in_dev->dev->ifindex;
1797 rth->u.dst.dev = (out_dev)->dev;
1798 dev_hold(rth->u.dst.dev);
1799 rth->idev = in_dev_get(rth->u.dst.dev);
1801 rth->rt_spec_dst= spec_dst;
1803 rth->u.dst.input = ip_forward;
1804 rth->u.dst.output = ip_output;
1806 rt_set_nexthop(rth, res, itag);
1808 rth->rt_flags = flags;
1813 /* release the working reference to the output device */
1814 in_dev_put(out_dev);
1818 static inline int ip_mkroute_input(struct sk_buff *skb,
1819 struct fib_result* res,
1820 const struct flowi *fl,
1821 struct in_device *in_dev,
1822 __be32 daddr, __be32 saddr, u32 tos)
1824 struct rtable* rth = NULL;
1828 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1829 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
1830 fib_select_multipath(fl, res);
1833 /* create a routing cache entry */
1834 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
1838 /* put it into the cache */
1839 hash = rt_hash(daddr, saddr, fl->iif);
1840 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1844 * NOTE. We drop all the packets that has local source
1845 * addresses, because every properly looped back packet
1846 * must have correct destination already attached by output routine.
1848 * Such approach solves two big problems:
1849 * 1. Not simplex devices are handled properly.
1850 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1853 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1854 u8 tos, struct net_device *dev)
1856 struct fib_result res;
1857 struct in_device *in_dev = in_dev_get(dev);
1858 struct flowi fl = { .nl_u = { .ip4_u =
1862 .scope = RT_SCOPE_UNIVERSE,
1865 .iif = dev->ifindex };
1868 struct rtable * rth;
1874 /* IP on this device is disabled. */
1879 /* Check for the most weird martians, which can be not detected
1883 if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr))
1884 goto martian_source;
1886 if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
1889 /* Accept zero addresses only to limited broadcast;
1890 * I even do not know to fix it or not. Waiting for complains :-)
1893 goto martian_source;
1895 if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
1896 goto martian_destination;
1899 * Now we are ready to route packet.
1901 if ((err = fib_lookup(&fl, &res)) != 0) {
1902 if (!IN_DEV_FORWARD(in_dev))
1908 RT_CACHE_STAT_INC(in_slow_tot);
1910 if (res.type == RTN_BROADCAST)
1913 if (res.type == RTN_LOCAL) {
1915 result = fib_validate_source(saddr, daddr, tos,
1916 init_net.loopback_dev->ifindex,
1917 dev, &spec_dst, &itag);
1919 goto martian_source;
1921 flags |= RTCF_DIRECTSRC;
1926 if (!IN_DEV_FORWARD(in_dev))
1928 if (res.type != RTN_UNICAST)
1929 goto martian_destination;
1931 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
1939 if (skb->protocol != htons(ETH_P_IP))
1943 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1945 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1948 goto martian_source;
1950 flags |= RTCF_DIRECTSRC;
1952 flags |= RTCF_BROADCAST;
1953 res.type = RTN_BROADCAST;
1954 RT_CACHE_STAT_INC(in_brd);
1957 rth = dst_alloc(&ipv4_dst_ops);
1961 rth->u.dst.output= ip_rt_bug;
1963 atomic_set(&rth->u.dst.__refcnt, 1);
1964 rth->u.dst.flags= DST_HOST;
1965 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1966 rth->u.dst.flags |= DST_NOPOLICY;
1967 rth->fl.fl4_dst = daddr;
1968 rth->rt_dst = daddr;
1969 rth->fl.fl4_tos = tos;
1970 rth->fl.mark = skb->mark;
1971 rth->fl.fl4_src = saddr;
1972 rth->rt_src = saddr;
1973 #ifdef CONFIG_NET_CLS_ROUTE
1974 rth->u.dst.tclassid = itag;
1977 rth->fl.iif = dev->ifindex;
1978 rth->u.dst.dev = init_net.loopback_dev;
1979 dev_hold(rth->u.dst.dev);
1980 rth->idev = in_dev_get(rth->u.dst.dev);
1981 rth->rt_gateway = daddr;
1982 rth->rt_spec_dst= spec_dst;
1983 rth->u.dst.input= ip_local_deliver;
1984 rth->rt_flags = flags|RTCF_LOCAL;
1985 if (res.type == RTN_UNREACHABLE) {
1986 rth->u.dst.input= ip_error;
1987 rth->u.dst.error= -err;
1988 rth->rt_flags &= ~RTCF_LOCAL;
1990 rth->rt_type = res.type;
1991 hash = rt_hash(daddr, saddr, fl.iif);
1992 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1996 RT_CACHE_STAT_INC(in_no_route);
1997 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
1998 res.type = RTN_UNREACHABLE;
2004 * Do not cache martian addresses: they should be logged (RFC1812)
2006 martian_destination:
2007 RT_CACHE_STAT_INC(in_martian_dst);
2008 #ifdef CONFIG_IP_ROUTE_VERBOSE
2009 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2010 printk(KERN_WARNING "martian destination %u.%u.%u.%u from "
2011 "%u.%u.%u.%u, dev %s\n",
2012 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
2016 err = -EHOSTUNREACH;
2028 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2032 int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2033 u8 tos, struct net_device *dev)
2035 struct rtable * rth;
2037 int iif = dev->ifindex;
2039 tos &= IPTOS_RT_MASK;
2040 hash = rt_hash(daddr, saddr, iif);
2043 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2044 rth = rcu_dereference(rth->u.dst.rt_next)) {
2045 if (rth->fl.fl4_dst == daddr &&
2046 rth->fl.fl4_src == saddr &&
2047 rth->fl.iif == iif &&
2049 rth->fl.mark == skb->mark &&
2050 rth->fl.fl4_tos == tos) {
2051 dst_use(&rth->u.dst, jiffies);
2052 RT_CACHE_STAT_INC(in_hit);
2054 skb->dst = (struct dst_entry*)rth;
2057 RT_CACHE_STAT_INC(in_hlist_search);
2061 /* Multicast recognition logic is moved from route cache to here.
2062 The problem was that too many Ethernet cards have broken/missing
2063 hardware multicast filters :-( As result the host on multicasting
2064 network acquires a lot of useless route cache entries, sort of
2065 SDR messages from all the world. Now we try to get rid of them.
2066 Really, provided software IP multicast filter is organized
2067 reasonably (at least, hashed), it does not result in a slowdown
2068 comparing with route cache reject entries.
2069 Note, that multicast routers are not affected, because
2070 route cache entry is created eventually.
2072 if (MULTICAST(daddr)) {
2073 struct in_device *in_dev;
2076 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
2077 int our = ip_check_mc(in_dev, daddr, saddr,
2078 ip_hdr(skb)->protocol);
2080 #ifdef CONFIG_IP_MROUTE
2081 || (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
2085 return ip_route_input_mc(skb, daddr, saddr,
2092 return ip_route_input_slow(skb, daddr, saddr, tos, dev);
2095 static inline int __mkroute_output(struct rtable **result,
2096 struct fib_result* res,
2097 const struct flowi *fl,
2098 const struct flowi *oldflp,
2099 struct net_device *dev_out,
2103 struct in_device *in_dev;
2104 u32 tos = RT_FL_TOS(oldflp);
2107 if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
2110 if (fl->fl4_dst == htonl(0xFFFFFFFF))
2111 res->type = RTN_BROADCAST;
2112 else if (MULTICAST(fl->fl4_dst))
2113 res->type = RTN_MULTICAST;
2114 else if (BADCLASS(fl->fl4_dst) || ZERONET(fl->fl4_dst))
2117 if (dev_out->flags & IFF_LOOPBACK)
2118 flags |= RTCF_LOCAL;
2120 /* get work reference to inet device */
2121 in_dev = in_dev_get(dev_out);
2125 if (res->type == RTN_BROADCAST) {
2126 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2128 fib_info_put(res->fi);
2131 } else if (res->type == RTN_MULTICAST) {
2132 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2133 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2135 flags &= ~RTCF_LOCAL;
2136 /* If multicast route do not exist use
2137 default one, but do not gateway in this case.
2140 if (res->fi && res->prefixlen < 4) {
2141 fib_info_put(res->fi);
2147 rth = dst_alloc(&ipv4_dst_ops);
2153 atomic_set(&rth->u.dst.__refcnt, 1);
2154 rth->u.dst.flags= DST_HOST;
2155 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2156 rth->u.dst.flags |= DST_NOXFRM;
2157 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2158 rth->u.dst.flags |= DST_NOPOLICY;
2160 rth->fl.fl4_dst = oldflp->fl4_dst;
2161 rth->fl.fl4_tos = tos;
2162 rth->fl.fl4_src = oldflp->fl4_src;
2163 rth->fl.oif = oldflp->oif;
2164 rth->fl.mark = oldflp->mark;
2165 rth->rt_dst = fl->fl4_dst;
2166 rth->rt_src = fl->fl4_src;
2167 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2168 /* get references to the devices that are to be hold by the routing
2170 rth->u.dst.dev = dev_out;
2172 rth->idev = in_dev_get(dev_out);
2173 rth->rt_gateway = fl->fl4_dst;
2174 rth->rt_spec_dst= fl->fl4_src;
2176 rth->u.dst.output=ip_output;
2178 RT_CACHE_STAT_INC(out_slow_tot);
2180 if (flags & RTCF_LOCAL) {
2181 rth->u.dst.input = ip_local_deliver;
2182 rth->rt_spec_dst = fl->fl4_dst;
2184 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2185 rth->rt_spec_dst = fl->fl4_src;
2186 if (flags & RTCF_LOCAL &&
2187 !(dev_out->flags & IFF_LOOPBACK)) {
2188 rth->u.dst.output = ip_mc_output;
2189 RT_CACHE_STAT_INC(out_slow_mc);
2191 #ifdef CONFIG_IP_MROUTE
2192 if (res->type == RTN_MULTICAST) {
2193 if (IN_DEV_MFORWARD(in_dev) &&
2194 !LOCAL_MCAST(oldflp->fl4_dst)) {
2195 rth->u.dst.input = ip_mr_input;
2196 rth->u.dst.output = ip_mc_output;
2202 rt_set_nexthop(rth, res, 0);
2204 rth->rt_flags = flags;
2208 /* release work reference to inet device */
2214 static inline int ip_mkroute_output(struct rtable **rp,
2215 struct fib_result* res,
2216 const struct flowi *fl,
2217 const struct flowi *oldflp,
2218 struct net_device *dev_out,
2221 struct rtable *rth = NULL;
2222 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2225 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
2226 err = rt_intern_hash(hash, rth, rp);
2233 * Major route resolver routine.
2236 static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
2238 u32 tos = RT_FL_TOS(oldflp);
2239 struct flowi fl = { .nl_u = { .ip4_u =
2240 { .daddr = oldflp->fl4_dst,
2241 .saddr = oldflp->fl4_src,
2242 .tos = tos & IPTOS_RT_MASK,
2243 .scope = ((tos & RTO_ONLINK) ?
2247 .mark = oldflp->mark,
2248 .iif = init_net.loopback_dev->ifindex,
2249 .oif = oldflp->oif };
2250 struct fib_result res;
2252 struct net_device *dev_out = NULL;
2258 #ifdef CONFIG_IP_MULTIPLE_TABLES
2262 if (oldflp->fl4_src) {
2264 if (MULTICAST(oldflp->fl4_src) ||
2265 BADCLASS(oldflp->fl4_src) ||
2266 ZERONET(oldflp->fl4_src))
2269 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2270 dev_out = ip_dev_find(oldflp->fl4_src);
2271 if (dev_out == NULL)
2274 /* I removed check for oif == dev_out->oif here.
2275 It was wrong for two reasons:
2276 1. ip_dev_find(saddr) can return wrong iface, if saddr is
2277 assigned to multiple interfaces.
2278 2. Moreover, we are allowed to send packets with saddr
2279 of another iface. --ANK
2282 if (oldflp->oif == 0
2283 && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2284 /* Special hack: user can direct multicasts
2285 and limited broadcast via necessary interface
2286 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2287 This hack is not just for fun, it allows
2288 vic,vat and friends to work.
2289 They bind socket to loopback, set ttl to zero
2290 and expect that it will work.
2291 From the viewpoint of routing cache they are broken,
2292 because we are not allowed to build multicast path
2293 with loopback source addr (look, routing cache
2294 cannot know, that ttl is zero, so that packet
2295 will not leave this host and route is valid).
2296 Luckily, this hack is good workaround.
2299 fl.oif = dev_out->ifindex;
2309 dev_out = dev_get_by_index(&init_net, oldflp->oif);
2311 if (dev_out == NULL)
2314 /* RACE: Check return value of inet_select_addr instead. */
2315 if (__in_dev_get_rtnl(dev_out) == NULL) {
2317 goto out; /* Wrong error code */
2320 if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
2322 fl.fl4_src = inet_select_addr(dev_out, 0,
2327 if (MULTICAST(oldflp->fl4_dst))
2328 fl.fl4_src = inet_select_addr(dev_out, 0,
2330 else if (!oldflp->fl4_dst)
2331 fl.fl4_src = inet_select_addr(dev_out, 0,
2337 fl.fl4_dst = fl.fl4_src;
2339 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2342 dev_out = init_net.loopback_dev;
2344 fl.oif = init_net.loopback_dev->ifindex;
2345 res.type = RTN_LOCAL;
2346 flags |= RTCF_LOCAL;
2350 if (fib_lookup(&fl, &res)) {
2353 /* Apparently, routing tables are wrong. Assume,
2354 that the destination is on link.
2357 Because we are allowed to send to iface
2358 even if it has NO routes and NO assigned
2359 addresses. When oif is specified, routing
2360 tables are looked up with only one purpose:
2361 to catch if destination is gatewayed, rather than
2362 direct. Moreover, if MSG_DONTROUTE is set,
2363 we send packet, ignoring both routing tables
2364 and ifaddr state. --ANK
2367 We could make it even if oif is unknown,
2368 likely IPv6, but we do not.
2371 if (fl.fl4_src == 0)
2372 fl.fl4_src = inet_select_addr(dev_out, 0,
2374 res.type = RTN_UNICAST;
2384 if (res.type == RTN_LOCAL) {
2386 fl.fl4_src = fl.fl4_dst;
2389 dev_out = init_net.loopback_dev;
2391 fl.oif = dev_out->ifindex;
2393 fib_info_put(res.fi);
2395 flags |= RTCF_LOCAL;
2399 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2400 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2401 fib_select_multipath(&fl, &res);
2404 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2405 fib_select_default(&fl, &res);
2408 fl.fl4_src = FIB_RES_PREFSRC(res);
2412 dev_out = FIB_RES_DEV(res);
2414 fl.oif = dev_out->ifindex;
2418 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2428 int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2433 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif);
2436 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2437 rth = rcu_dereference(rth->u.dst.rt_next)) {
2438 if (rth->fl.fl4_dst == flp->fl4_dst &&
2439 rth->fl.fl4_src == flp->fl4_src &&
2441 rth->fl.oif == flp->oif &&
2442 rth->fl.mark == flp->mark &&
2443 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2444 (IPTOS_RT_MASK | RTO_ONLINK))) {
2445 dst_use(&rth->u.dst, jiffies);
2446 RT_CACHE_STAT_INC(out_hit);
2447 rcu_read_unlock_bh();
2451 RT_CACHE_STAT_INC(out_hlist_search);
2453 rcu_read_unlock_bh();
2455 return ip_route_output_slow(rp, flp);
2458 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2460 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2464 static struct dst_ops ipv4_dst_blackhole_ops = {
2466 .protocol = __constant_htons(ETH_P_IP),
2467 .destroy = ipv4_dst_destroy,
2468 .check = ipv4_dst_check,
2469 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2470 .entry_size = sizeof(struct rtable),
2474 static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock *sk)
2476 struct rtable *ort = *rp;
2477 struct rtable *rt = (struct rtable *)
2478 dst_alloc(&ipv4_dst_blackhole_ops);
2481 struct dst_entry *new = &rt->u.dst;
2483 atomic_set(&new->__refcnt, 1);
2485 new->input = dst_discard;
2486 new->output = dst_discard;
2487 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
2489 new->dev = ort->u.dst.dev;
2495 rt->idev = ort->idev;
2497 in_dev_hold(rt->idev);
2498 rt->rt_flags = ort->rt_flags;
2499 rt->rt_type = ort->rt_type;
2500 rt->rt_dst = ort->rt_dst;
2501 rt->rt_src = ort->rt_src;
2502 rt->rt_iif = ort->rt_iif;
2503 rt->rt_gateway = ort->rt_gateway;
2504 rt->rt_spec_dst = ort->rt_spec_dst;
2505 rt->peer = ort->peer;
2507 atomic_inc(&rt->peer->refcnt);
2512 dst_release(&(*rp)->u.dst);
2514 return (rt ? 0 : -ENOMEM);
2517 int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags)
2521 if ((err = __ip_route_output_key(rp, flp)) != 0)
2526 flp->fl4_src = (*rp)->rt_src;
2528 flp->fl4_dst = (*rp)->rt_dst;
2529 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, flags);
2530 if (err == -EREMOTE)
2531 err = ipv4_dst_blackhole(rp, flp, sk);
2539 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2541 int ip_route_output_key(struct rtable **rp, struct flowi *flp)
2543 return ip_route_output_flow(rp, flp, NULL, 0);
2546 static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2547 int nowait, unsigned int flags)
2549 struct rtable *rt = (struct rtable*)skb->dst;
2551 struct nlmsghdr *nlh;
2553 u32 id = 0, ts = 0, tsage = 0, error;
2555 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2559 r = nlmsg_data(nlh);
2560 r->rtm_family = AF_INET;
2561 r->rtm_dst_len = 32;
2563 r->rtm_tos = rt->fl.fl4_tos;
2564 r->rtm_table = RT_TABLE_MAIN;
2565 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2566 r->rtm_type = rt->rt_type;
2567 r->rtm_scope = RT_SCOPE_UNIVERSE;
2568 r->rtm_protocol = RTPROT_UNSPEC;
2569 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2570 if (rt->rt_flags & RTCF_NOTIFY)
2571 r->rtm_flags |= RTM_F_NOTIFY;
2573 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2575 if (rt->fl.fl4_src) {
2576 r->rtm_src_len = 32;
2577 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2580 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
2581 #ifdef CONFIG_NET_CLS_ROUTE
2582 if (rt->u.dst.tclassid)
2583 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
2586 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2587 else if (rt->rt_src != rt->fl.fl4_src)
2588 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2590 if (rt->rt_dst != rt->rt_gateway)
2591 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2593 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2594 goto nla_put_failure;
2596 error = rt->u.dst.error;
2597 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2599 id = rt->peer->ip_id_count;
2600 if (rt->peer->tcp_ts_stamp) {
2601 ts = rt->peer->tcp_ts;
2602 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2607 #ifdef CONFIG_IP_MROUTE
2608 __be32 dst = rt->rt_dst;
2610 if (MULTICAST(dst) && !LOCAL_MCAST(dst) &&
2611 IPV4_DEVCONF_ALL(MC_FORWARDING)) {
2612 int err = ipmr_get_route(skb, r, nowait);
2617 goto nla_put_failure;
2619 if (err == -EMSGSIZE)
2620 goto nla_put_failure;
2626 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2629 if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
2630 expires, error) < 0)
2631 goto nla_put_failure;
2633 return nlmsg_end(skb, nlh);
2636 nlmsg_cancel(skb, nlh);
2640 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2642 struct net *net = in_skb->sk->sk_net;
2644 struct nlattr *tb[RTA_MAX+1];
2645 struct rtable *rt = NULL;
2650 struct sk_buff *skb;
2652 if (net != &init_net)
2655 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2659 rtm = nlmsg_data(nlh);
2661 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2667 /* Reserve room for dummy headers, this skb can pass
2668 through good chunk of routing engine.
2670 skb_reset_mac_header(skb);
2671 skb_reset_network_header(skb);
2673 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2674 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2675 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2677 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2678 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2679 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2682 struct net_device *dev;
2684 dev = __dev_get_by_index(&init_net, iif);
2690 skb->protocol = htons(ETH_P_IP);
2693 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2696 rt = (struct rtable*) skb->dst;
2697 if (err == 0 && rt->u.dst.error)
2698 err = -rt->u.dst.error;
2705 .tos = rtm->rtm_tos,
2708 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2710 err = ip_route_output_key(&rt, &fl);
2716 skb->dst = &rt->u.dst;
2717 if (rtm->rtm_flags & RTM_F_NOTIFY)
2718 rt->rt_flags |= RTCF_NOTIFY;
2720 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2721 RTM_NEWROUTE, 0, 0);
2725 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
2734 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2743 s_idx = idx = cb->args[1];
2744 for (h = s_h; h <= rt_hash_mask; h++) {
2746 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2747 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2750 skb->dst = dst_clone(&rt->u.dst);
2751 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
2752 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2753 1, NLM_F_MULTI) <= 0) {
2754 dst_release(xchg(&skb->dst, NULL));
2755 rcu_read_unlock_bh();
2758 dst_release(xchg(&skb->dst, NULL));
2760 rcu_read_unlock_bh();
2770 void ip_rt_multicast_event(struct in_device *in_dev)
2775 #ifdef CONFIG_SYSCTL
2776 static int flush_delay;
2778 static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
2779 struct file *filp, void __user *buffer,
2780 size_t *lenp, loff_t *ppos)
2783 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2784 rt_cache_flush(flush_delay);
2791 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2794 void __user *oldval,
2795 size_t __user *oldlenp,
2796 void __user *newval,
2800 if (newlen != sizeof(int))
2802 if (get_user(delay, (int __user *)newval))
2804 rt_cache_flush(delay);
2808 ctl_table ipv4_route_table[] = {
2810 .ctl_name = NET_IPV4_ROUTE_FLUSH,
2811 .procname = "flush",
2812 .data = &flush_delay,
2813 .maxlen = sizeof(int),
2815 .proc_handler = &ipv4_sysctl_rtcache_flush,
2816 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
2819 .ctl_name = NET_IPV4_ROUTE_MIN_DELAY,
2820 .procname = "min_delay",
2821 .data = &ip_rt_min_delay,
2822 .maxlen = sizeof(int),
2824 .proc_handler = &proc_dointvec_jiffies,
2825 .strategy = &sysctl_jiffies,
2828 .ctl_name = NET_IPV4_ROUTE_MAX_DELAY,
2829 .procname = "max_delay",
2830 .data = &ip_rt_max_delay,
2831 .maxlen = sizeof(int),
2833 .proc_handler = &proc_dointvec_jiffies,
2834 .strategy = &sysctl_jiffies,
2837 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
2838 .procname = "gc_thresh",
2839 .data = &ipv4_dst_ops.gc_thresh,
2840 .maxlen = sizeof(int),
2842 .proc_handler = &proc_dointvec,
2845 .ctl_name = NET_IPV4_ROUTE_MAX_SIZE,
2846 .procname = "max_size",
2847 .data = &ip_rt_max_size,
2848 .maxlen = sizeof(int),
2850 .proc_handler = &proc_dointvec,
2853 /* Deprecated. Use gc_min_interval_ms */
2855 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
2856 .procname = "gc_min_interval",
2857 .data = &ip_rt_gc_min_interval,
2858 .maxlen = sizeof(int),
2860 .proc_handler = &proc_dointvec_jiffies,
2861 .strategy = &sysctl_jiffies,
2864 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
2865 .procname = "gc_min_interval_ms",
2866 .data = &ip_rt_gc_min_interval,
2867 .maxlen = sizeof(int),
2869 .proc_handler = &proc_dointvec_ms_jiffies,
2870 .strategy = &sysctl_ms_jiffies,
2873 .ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT,
2874 .procname = "gc_timeout",
2875 .data = &ip_rt_gc_timeout,
2876 .maxlen = sizeof(int),
2878 .proc_handler = &proc_dointvec_jiffies,
2879 .strategy = &sysctl_jiffies,
2882 .ctl_name = NET_IPV4_ROUTE_GC_INTERVAL,
2883 .procname = "gc_interval",
2884 .data = &ip_rt_gc_interval,
2885 .maxlen = sizeof(int),
2887 .proc_handler = &proc_dointvec_jiffies,
2888 .strategy = &sysctl_jiffies,
2891 .ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD,
2892 .procname = "redirect_load",
2893 .data = &ip_rt_redirect_load,
2894 .maxlen = sizeof(int),
2896 .proc_handler = &proc_dointvec,
2899 .ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER,
2900 .procname = "redirect_number",
2901 .data = &ip_rt_redirect_number,
2902 .maxlen = sizeof(int),
2904 .proc_handler = &proc_dointvec,
2907 .ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE,
2908 .procname = "redirect_silence",
2909 .data = &ip_rt_redirect_silence,
2910 .maxlen = sizeof(int),
2912 .proc_handler = &proc_dointvec,
2915 .ctl_name = NET_IPV4_ROUTE_ERROR_COST,
2916 .procname = "error_cost",
2917 .data = &ip_rt_error_cost,
2918 .maxlen = sizeof(int),
2920 .proc_handler = &proc_dointvec,
2923 .ctl_name = NET_IPV4_ROUTE_ERROR_BURST,
2924 .procname = "error_burst",
2925 .data = &ip_rt_error_burst,
2926 .maxlen = sizeof(int),
2928 .proc_handler = &proc_dointvec,
2931 .ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY,
2932 .procname = "gc_elasticity",
2933 .data = &ip_rt_gc_elasticity,
2934 .maxlen = sizeof(int),
2936 .proc_handler = &proc_dointvec,
2939 .ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES,
2940 .procname = "mtu_expires",
2941 .data = &ip_rt_mtu_expires,
2942 .maxlen = sizeof(int),
2944 .proc_handler = &proc_dointvec_jiffies,
2945 .strategy = &sysctl_jiffies,
2948 .ctl_name = NET_IPV4_ROUTE_MIN_PMTU,
2949 .procname = "min_pmtu",
2950 .data = &ip_rt_min_pmtu,
2951 .maxlen = sizeof(int),
2953 .proc_handler = &proc_dointvec,
2956 .ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS,
2957 .procname = "min_adv_mss",
2958 .data = &ip_rt_min_advmss,
2959 .maxlen = sizeof(int),
2961 .proc_handler = &proc_dointvec,
2964 .ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL,
2965 .procname = "secret_interval",
2966 .data = &ip_rt_secret_interval,
2967 .maxlen = sizeof(int),
2969 .proc_handler = &proc_dointvec_jiffies,
2970 .strategy = &sysctl_jiffies,
2976 #ifdef CONFIG_NET_CLS_ROUTE
2977 struct ip_rt_acct *ip_rt_acct __read_mostly;
2978 #endif /* CONFIG_NET_CLS_ROUTE */
2980 static __initdata unsigned long rhash_entries;
2981 static int __init set_rhash_entries(char *str)
2985 rhash_entries = simple_strtoul(str, &str, 0);
2988 __setup("rhash_entries=", set_rhash_entries);
2990 int __init ip_rt_init(void)
2994 rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^
2995 (jiffies ^ (jiffies >> 7)));
2997 #ifdef CONFIG_NET_CLS_ROUTE
2998 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
3000 panic("IP: failed to allocate ip_rt_acct\n");
3003 ipv4_dst_ops.kmem_cachep =
3004 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3005 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3007 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3009 rt_hash_table = (struct rt_hash_bucket *)
3010 alloc_large_system_hash("IP route cache",
3011 sizeof(struct rt_hash_bucket),
3013 (num_physpages >= 128 * 1024) ?
3019 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3020 rt_hash_lock_init();
3022 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3023 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3028 setup_timer(&rt_flush_timer, rt_run_flush, 0);
3029 setup_timer(&rt_secret_timer, rt_secret_rebuild, 0);
3031 /* All the timers, started at system startup tend
3032 to synchronize. Perturb it a bit.
3034 schedule_delayed_work(&expires_work,
3035 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3037 rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval +
3038 ip_rt_secret_interval;
3039 add_timer(&rt_secret_timer);
3041 if (ip_rt_proc_init(&init_net))
3042 printk(KERN_ERR "Unable to create route proc files\n");
3047 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3052 EXPORT_SYMBOL(__ip_select_ident);
3053 EXPORT_SYMBOL(ip_route_input);
3054 EXPORT_SYMBOL(ip_route_output_key);