[NET]: Eliminate duplicate copies of dst_discard
[safe/jmp/linux-2.6] / net / ipv4 / route.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              ROUTE - implementation of the IP router.
7  *
8  * Version:     $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
9  *
10  * Authors:     Ross Biro
11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
13  *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
14  *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15  *
16  * Fixes:
17  *              Alan Cox        :       Verify area fixes.
18  *              Alan Cox        :       cli() protects routing changes
19  *              Rui Oliveira    :       ICMP routing table updates
20  *              (rco@di.uminho.pt)      Routing table insertion and update
21  *              Linus Torvalds  :       Rewrote bits to be sensible
22  *              Alan Cox        :       Added BSD route gw semantics
23  *              Alan Cox        :       Super /proc >4K
24  *              Alan Cox        :       MTU in route table
25  *              Alan Cox        :       MSS actually. Also added the window
26  *                                      clamper.
27  *              Sam Lantinga    :       Fixed route matching in rt_del()
28  *              Alan Cox        :       Routing cache support.
29  *              Alan Cox        :       Removed compatibility cruft.
30  *              Alan Cox        :       RTF_REJECT support.
31  *              Alan Cox        :       TCP irtt support.
32  *              Jonathan Naylor :       Added Metric support.
33  *      Miquel van Smoorenburg  :       BSD API fixes.
34  *      Miquel van Smoorenburg  :       Metrics.
35  *              Alan Cox        :       Use __u32 properly
36  *              Alan Cox        :       Aligned routing errors more closely with BSD
37  *                                      our system is still very different.
38  *              Alan Cox        :       Faster /proc handling
39  *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
40  *                                      routing caches and better behaviour.
41  *
42  *              Olaf Erb        :       irtt wasn't being copied right.
43  *              Bjorn Ekwall    :       Kerneld route support.
44  *              Alan Cox        :       Multicast fixed (I hope)
45  *              Pavel Krauz     :       Limited broadcast fixed
46  *              Mike McLagan    :       Routing by source
47  *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
48  *                                      route.c and rewritten from scratch.
49  *              Andi Kleen      :       Load-limit warning messages.
50  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
51  *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
52  *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
53  *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
54  *              Marc Boucher    :       routing by fwmark
55  *      Robert Olsson           :       Added rt_cache statistics
56  *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
57  *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
58  *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
59  *      Ilia Sotnikov           :       Removed TOS from hash calculations
60  *
61  *              This program is free software; you can redistribute it and/or
62  *              modify it under the terms of the GNU General Public License
63  *              as published by the Free Software Foundation; either version
64  *              2 of the License, or (at your option) any later version.
65  */
66
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
70 #include <linux/bitops.h>
71 #include <linux/types.h>
72 #include <linux/kernel.h>
73 #include <linux/mm.h>
74 #include <linux/bootmem.h>
75 #include <linux/string.h>
76 #include <linux/socket.h>
77 #include <linux/sockios.h>
78 #include <linux/errno.h>
79 #include <linux/in.h>
80 #include <linux/inet.h>
81 #include <linux/netdevice.h>
82 #include <linux/proc_fs.h>
83 #include <linux/init.h>
84 #include <linux/workqueue.h>
85 #include <linux/skbuff.h>
86 #include <linux/inetdevice.h>
87 #include <linux/igmp.h>
88 #include <linux/pkt_sched.h>
89 #include <linux/mroute.h>
90 #include <linux/netfilter_ipv4.h>
91 #include <linux/random.h>
92 #include <linux/jhash.h>
93 #include <linux/rcupdate.h>
94 #include <linux/times.h>
95 #include <net/dst.h>
96 #include <net/net_namespace.h>
97 #include <net/protocol.h>
98 #include <net/ip.h>
99 #include <net/route.h>
100 #include <net/inetpeer.h>
101 #include <net/sock.h>
102 #include <net/ip_fib.h>
103 #include <net/arp.h>
104 #include <net/tcp.h>
105 #include <net/icmp.h>
106 #include <net/xfrm.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
109 #ifdef CONFIG_SYSCTL
110 #include <linux/sysctl.h>
111 #endif
112
113 #define RT_FL_TOS(oldflp) \
114     ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
115
116 #define IP_MAX_MTU      0xFFF0
117
118 #define RT_GC_TIMEOUT (300*HZ)
119
120 static int ip_rt_min_delay              = 2 * HZ;
121 static int ip_rt_max_delay              = 10 * HZ;
122 static int ip_rt_max_size;
123 static int ip_rt_gc_timeout             = RT_GC_TIMEOUT;
124 static int ip_rt_gc_interval            = 60 * HZ;
125 static int ip_rt_gc_min_interval        = HZ / 2;
126 static int ip_rt_redirect_number        = 9;
127 static int ip_rt_redirect_load          = HZ / 50;
128 static int ip_rt_redirect_silence       = ((HZ / 50) << (9 + 1));
129 static int ip_rt_error_cost             = HZ;
130 static int ip_rt_error_burst            = 5 * HZ;
131 static int ip_rt_gc_elasticity          = 8;
132 static int ip_rt_mtu_expires            = 10 * 60 * HZ;
133 static int ip_rt_min_pmtu               = 512 + 20 + 20;
134 static int ip_rt_min_advmss             = 256;
135 static int ip_rt_secret_interval        = 10 * 60 * HZ;
136 static unsigned long rt_deadline;
137
138 #define RTprint(a...)   printk(KERN_DEBUG a)
139
140 static struct timer_list rt_flush_timer;
141 static void rt_check_expire(struct work_struct *work);
142 static DECLARE_DELAYED_WORK(expires_work, rt_check_expire);
143 static struct timer_list rt_secret_timer;
144
145 /*
146  *      Interface to generic destination cache.
147  */
148
149 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
150 static void              ipv4_dst_destroy(struct dst_entry *dst);
151 static void              ipv4_dst_ifdown(struct dst_entry *dst,
152                                          struct net_device *dev, int how);
153 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
154 static void              ipv4_link_failure(struct sk_buff *skb);
155 static void              ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
156 static int rt_garbage_collect(void);
157
158
159 static struct dst_ops ipv4_dst_ops = {
160         .family =               AF_INET,
161         .protocol =             __constant_htons(ETH_P_IP),
162         .gc =                   rt_garbage_collect,
163         .check =                ipv4_dst_check,
164         .destroy =              ipv4_dst_destroy,
165         .ifdown =               ipv4_dst_ifdown,
166         .negative_advice =      ipv4_negative_advice,
167         .link_failure =         ipv4_link_failure,
168         .update_pmtu =          ip_rt_update_pmtu,
169         .entry_size =           sizeof(struct rtable),
170 };
171
172 #define ECN_OR_COST(class)      TC_PRIO_##class
173
174 const __u8 ip_tos2prio[16] = {
175         TC_PRIO_BESTEFFORT,
176         ECN_OR_COST(FILLER),
177         TC_PRIO_BESTEFFORT,
178         ECN_OR_COST(BESTEFFORT),
179         TC_PRIO_BULK,
180         ECN_OR_COST(BULK),
181         TC_PRIO_BULK,
182         ECN_OR_COST(BULK),
183         TC_PRIO_INTERACTIVE,
184         ECN_OR_COST(INTERACTIVE),
185         TC_PRIO_INTERACTIVE,
186         ECN_OR_COST(INTERACTIVE),
187         TC_PRIO_INTERACTIVE_BULK,
188         ECN_OR_COST(INTERACTIVE_BULK),
189         TC_PRIO_INTERACTIVE_BULK,
190         ECN_OR_COST(INTERACTIVE_BULK)
191 };
192
193
194 /*
195  * Route cache.
196  */
197
198 /* The locking scheme is rather straight forward:
199  *
200  * 1) Read-Copy Update protects the buckets of the central route hash.
201  * 2) Only writers remove entries, and they hold the lock
202  *    as they look at rtable reference counts.
203  * 3) Only readers acquire references to rtable entries,
204  *    they do so with atomic increments and with the
205  *    lock held.
206  */
207
208 struct rt_hash_bucket {
209         struct rtable   *chain;
210 };
211 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
212         defined(CONFIG_PROVE_LOCKING)
213 /*
214  * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
215  * The size of this table is a power of two and depends on the number of CPUS.
216  * (on lockdep we have a quite big spinlock_t, so keep the size down there)
217  */
218 #ifdef CONFIG_LOCKDEP
219 # define RT_HASH_LOCK_SZ        256
220 #else
221 # if NR_CPUS >= 32
222 #  define RT_HASH_LOCK_SZ       4096
223 # elif NR_CPUS >= 16
224 #  define RT_HASH_LOCK_SZ       2048
225 # elif NR_CPUS >= 8
226 #  define RT_HASH_LOCK_SZ       1024
227 # elif NR_CPUS >= 4
228 #  define RT_HASH_LOCK_SZ       512
229 # else
230 #  define RT_HASH_LOCK_SZ       256
231 # endif
232 #endif
233
234 static spinlock_t       *rt_hash_locks;
235 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
236 # define rt_hash_lock_init()    { \
237                 int i; \
238                 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ, GFP_KERNEL); \
239                 if (!rt_hash_locks) panic("IP: failed to allocate rt_hash_locks\n"); \
240                 for (i = 0; i < RT_HASH_LOCK_SZ; i++) \
241                         spin_lock_init(&rt_hash_locks[i]); \
242                 }
243 #else
244 # define rt_hash_lock_addr(slot) NULL
245 # define rt_hash_lock_init()
246 #endif
247
248 static struct rt_hash_bucket    *rt_hash_table;
249 static unsigned                 rt_hash_mask;
250 static unsigned int             rt_hash_log;
251 static unsigned int             rt_hash_rnd;
252
253 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
254 #define RT_CACHE_STAT_INC(field) \
255         (__raw_get_cpu_var(rt_cache_stat).field++)
256
257 static int rt_intern_hash(unsigned hash, struct rtable *rth,
258                                 struct rtable **res);
259
260 static unsigned int rt_hash_code(u32 daddr, u32 saddr)
261 {
262         return (jhash_2words(daddr, saddr, rt_hash_rnd)
263                 & rt_hash_mask);
264 }
265
266 #define rt_hash(daddr, saddr, idx) \
267         rt_hash_code((__force u32)(__be32)(daddr),\
268                      (__force u32)(__be32)(saddr) ^ ((idx) << 5))
269
270 #ifdef CONFIG_PROC_FS
271 struct rt_cache_iter_state {
272         int bucket;
273 };
274
275 static struct rtable *rt_cache_get_first(struct seq_file *seq)
276 {
277         struct rtable *r = NULL;
278         struct rt_cache_iter_state *st = seq->private;
279
280         for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
281                 rcu_read_lock_bh();
282                 r = rt_hash_table[st->bucket].chain;
283                 if (r)
284                         break;
285                 rcu_read_unlock_bh();
286         }
287         return rcu_dereference(r);
288 }
289
290 static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
291 {
292         struct rt_cache_iter_state *st = seq->private;
293
294         r = r->u.dst.rt_next;
295         while (!r) {
296                 rcu_read_unlock_bh();
297                 if (--st->bucket < 0)
298                         break;
299                 rcu_read_lock_bh();
300                 r = rt_hash_table[st->bucket].chain;
301         }
302         return rcu_dereference(r);
303 }
304
305 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
306 {
307         struct rtable *r = rt_cache_get_first(seq);
308
309         if (r)
310                 while (pos && (r = rt_cache_get_next(seq, r)))
311                         --pos;
312         return pos ? NULL : r;
313 }
314
315 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
316 {
317         return *pos ? rt_cache_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
318 }
319
320 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
321 {
322         struct rtable *r = NULL;
323
324         if (v == SEQ_START_TOKEN)
325                 r = rt_cache_get_first(seq);
326         else
327                 r = rt_cache_get_next(seq, v);
328         ++*pos;
329         return r;
330 }
331
332 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
333 {
334         if (v && v != SEQ_START_TOKEN)
335                 rcu_read_unlock_bh();
336 }
337
338 static int rt_cache_seq_show(struct seq_file *seq, void *v)
339 {
340         if (v == SEQ_START_TOKEN)
341                 seq_printf(seq, "%-127s\n",
342                            "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
343                            "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
344                            "HHUptod\tSpecDst");
345         else {
346                 struct rtable *r = v;
347                 char temp[256];
348
349                 sprintf(temp, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
350                               "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
351                         r->u.dst.dev ? r->u.dst.dev->name : "*",
352                         (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
353                         r->rt_flags, atomic_read(&r->u.dst.__refcnt),
354                         r->u.dst.__use, 0, (unsigned long)r->rt_src,
355                         (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
356                              (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
357                         dst_metric(&r->u.dst, RTAX_WINDOW),
358                         (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
359                               dst_metric(&r->u.dst, RTAX_RTTVAR)),
360                         r->fl.fl4_tos,
361                         r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
362                         r->u.dst.hh ? (r->u.dst.hh->hh_output ==
363                                        dev_queue_xmit) : 0,
364                         r->rt_spec_dst);
365                 seq_printf(seq, "%-127s\n", temp);
366         }
367         return 0;
368 }
369
370 static const struct seq_operations rt_cache_seq_ops = {
371         .start  = rt_cache_seq_start,
372         .next   = rt_cache_seq_next,
373         .stop   = rt_cache_seq_stop,
374         .show   = rt_cache_seq_show,
375 };
376
377 static int rt_cache_seq_open(struct inode *inode, struct file *file)
378 {
379         return seq_open_private(file, &rt_cache_seq_ops,
380                         sizeof(struct rt_cache_iter_state));
381 }
382
383 static const struct file_operations rt_cache_seq_fops = {
384         .owner   = THIS_MODULE,
385         .open    = rt_cache_seq_open,
386         .read    = seq_read,
387         .llseek  = seq_lseek,
388         .release = seq_release_private,
389 };
390
391
392 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
393 {
394         int cpu;
395
396         if (*pos == 0)
397                 return SEQ_START_TOKEN;
398
399         for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
400                 if (!cpu_possible(cpu))
401                         continue;
402                 *pos = cpu+1;
403                 return &per_cpu(rt_cache_stat, cpu);
404         }
405         return NULL;
406 }
407
408 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
409 {
410         int cpu;
411
412         for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
413                 if (!cpu_possible(cpu))
414                         continue;
415                 *pos = cpu+1;
416                 return &per_cpu(rt_cache_stat, cpu);
417         }
418         return NULL;
419
420 }
421
422 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
423 {
424
425 }
426
427 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
428 {
429         struct rt_cache_stat *st = v;
430
431         if (v == SEQ_START_TOKEN) {
432                 seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
433                 return 0;
434         }
435
436         seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
437                    " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
438                    atomic_read(&ipv4_dst_ops.entries),
439                    st->in_hit,
440                    st->in_slow_tot,
441                    st->in_slow_mc,
442                    st->in_no_route,
443                    st->in_brd,
444                    st->in_martian_dst,
445                    st->in_martian_src,
446
447                    st->out_hit,
448                    st->out_slow_tot,
449                    st->out_slow_mc,
450
451                    st->gc_total,
452                    st->gc_ignored,
453                    st->gc_goal_miss,
454                    st->gc_dst_overflow,
455                    st->in_hlist_search,
456                    st->out_hlist_search
457                 );
458         return 0;
459 }
460
461 static const struct seq_operations rt_cpu_seq_ops = {
462         .start  = rt_cpu_seq_start,
463         .next   = rt_cpu_seq_next,
464         .stop   = rt_cpu_seq_stop,
465         .show   = rt_cpu_seq_show,
466 };
467
468
469 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
470 {
471         return seq_open(file, &rt_cpu_seq_ops);
472 }
473
474 static const struct file_operations rt_cpu_seq_fops = {
475         .owner   = THIS_MODULE,
476         .open    = rt_cpu_seq_open,
477         .read    = seq_read,
478         .llseek  = seq_lseek,
479         .release = seq_release,
480 };
481
482 #endif /* CONFIG_PROC_FS */
483
484 static __inline__ void rt_free(struct rtable *rt)
485 {
486         call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
487 }
488
489 static __inline__ void rt_drop(struct rtable *rt)
490 {
491         ip_rt_put(rt);
492         call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
493 }
494
495 static __inline__ int rt_fast_clean(struct rtable *rth)
496 {
497         /* Kill broadcast/multicast entries very aggresively, if they
498            collide in hash table with more useful entries */
499         return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
500                 rth->fl.iif && rth->u.dst.rt_next;
501 }
502
503 static __inline__ int rt_valuable(struct rtable *rth)
504 {
505         return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
506                 rth->u.dst.expires;
507 }
508
509 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
510 {
511         unsigned long age;
512         int ret = 0;
513
514         if (atomic_read(&rth->u.dst.__refcnt))
515                 goto out;
516
517         ret = 1;
518         if (rth->u.dst.expires &&
519             time_after_eq(jiffies, rth->u.dst.expires))
520                 goto out;
521
522         age = jiffies - rth->u.dst.lastuse;
523         ret = 0;
524         if ((age <= tmo1 && !rt_fast_clean(rth)) ||
525             (age <= tmo2 && rt_valuable(rth)))
526                 goto out;
527         ret = 1;
528 out:    return ret;
529 }
530
531 /* Bits of score are:
532  * 31: very valuable
533  * 30: not quite useless
534  * 29..0: usage counter
535  */
536 static inline u32 rt_score(struct rtable *rt)
537 {
538         u32 score = jiffies - rt->u.dst.lastuse;
539
540         score = ~score & ~(3<<30);
541
542         if (rt_valuable(rt))
543                 score |= (1<<31);
544
545         if (!rt->fl.iif ||
546             !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
547                 score |= (1<<30);
548
549         return score;
550 }
551
552 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
553 {
554         return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
555                 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
556                 (fl1->mark ^ fl2->mark) |
557                 (*(u16 *)&fl1->nl_u.ip4_u.tos ^
558                  *(u16 *)&fl2->nl_u.ip4_u.tos) |
559                 (fl1->oif ^ fl2->oif) |
560                 (fl1->iif ^ fl2->iif)) == 0;
561 }
562
563 static void rt_check_expire(struct work_struct *work)
564 {
565         static unsigned int rover;
566         unsigned int i = rover, goal;
567         struct rtable *rth, **rthp;
568         u64 mult;
569
570         mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
571         if (ip_rt_gc_timeout > 1)
572                 do_div(mult, ip_rt_gc_timeout);
573         goal = (unsigned int)mult;
574         if (goal > rt_hash_mask)
575                 goal = rt_hash_mask + 1;
576         for (; goal > 0; goal--) {
577                 unsigned long tmo = ip_rt_gc_timeout;
578
579                 i = (i + 1) & rt_hash_mask;
580                 rthp = &rt_hash_table[i].chain;
581
582                 if (need_resched())
583                         cond_resched();
584
585                 if (*rthp == NULL)
586                         continue;
587                 spin_lock_bh(rt_hash_lock_addr(i));
588                 while ((rth = *rthp) != NULL) {
589                         if (rth->u.dst.expires) {
590                                 /* Entry is expired even if it is in use */
591                                 if (time_before_eq(jiffies, rth->u.dst.expires)) {
592                                         tmo >>= 1;
593                                         rthp = &rth->u.dst.rt_next;
594                                         continue;
595                                 }
596                         } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
597                                 tmo >>= 1;
598                                 rthp = &rth->u.dst.rt_next;
599                                 continue;
600                         }
601
602                         /* Cleanup aged off entries. */
603                         *rthp = rth->u.dst.rt_next;
604                         rt_free(rth);
605                 }
606                 spin_unlock_bh(rt_hash_lock_addr(i));
607         }
608         rover = i;
609         schedule_delayed_work(&expires_work, ip_rt_gc_interval);
610 }
611
612 /* This can run from both BH and non-BH contexts, the latter
613  * in the case of a forced flush event.
614  */
615 static void rt_run_flush(unsigned long dummy)
616 {
617         int i;
618         struct rtable *rth, *next;
619
620         rt_deadline = 0;
621
622         get_random_bytes(&rt_hash_rnd, 4);
623
624         for (i = rt_hash_mask; i >= 0; i--) {
625                 spin_lock_bh(rt_hash_lock_addr(i));
626                 rth = rt_hash_table[i].chain;
627                 if (rth)
628                         rt_hash_table[i].chain = NULL;
629                 spin_unlock_bh(rt_hash_lock_addr(i));
630
631                 for (; rth; rth = next) {
632                         next = rth->u.dst.rt_next;
633                         rt_free(rth);
634                 }
635         }
636 }
637
638 static DEFINE_SPINLOCK(rt_flush_lock);
639
640 void rt_cache_flush(int delay)
641 {
642         unsigned long now = jiffies;
643         int user_mode = !in_softirq();
644
645         if (delay < 0)
646                 delay = ip_rt_min_delay;
647
648         spin_lock_bh(&rt_flush_lock);
649
650         if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) {
651                 long tmo = (long)(rt_deadline - now);
652
653                 /* If flush timer is already running
654                    and flush request is not immediate (delay > 0):
655
656                    if deadline is not achieved, prolongate timer to "delay",
657                    otherwise fire it at deadline time.
658                  */
659
660                 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
661                         tmo = 0;
662
663                 if (delay > tmo)
664                         delay = tmo;
665         }
666
667         if (delay <= 0) {
668                 spin_unlock_bh(&rt_flush_lock);
669                 rt_run_flush(0);
670                 return;
671         }
672
673         if (rt_deadline == 0)
674                 rt_deadline = now + ip_rt_max_delay;
675
676         mod_timer(&rt_flush_timer, now+delay);
677         spin_unlock_bh(&rt_flush_lock);
678 }
679
680 static void rt_secret_rebuild(unsigned long dummy)
681 {
682         unsigned long now = jiffies;
683
684         rt_cache_flush(0);
685         mod_timer(&rt_secret_timer, now + ip_rt_secret_interval);
686 }
687
688 /*
689    Short description of GC goals.
690
691    We want to build algorithm, which will keep routing cache
692    at some equilibrium point, when number of aged off entries
693    is kept approximately equal to newly generated ones.
694
695    Current expiration strength is variable "expire".
696    We try to adjust it dynamically, so that if networking
697    is idle expires is large enough to keep enough of warm entries,
698    and when load increases it reduces to limit cache size.
699  */
700
701 static int rt_garbage_collect(void)
702 {
703         static unsigned long expire = RT_GC_TIMEOUT;
704         static unsigned long last_gc;
705         static int rover;
706         static int equilibrium;
707         struct rtable *rth, **rthp;
708         unsigned long now = jiffies;
709         int goal;
710
711         /*
712          * Garbage collection is pretty expensive,
713          * do not make it too frequently.
714          */
715
716         RT_CACHE_STAT_INC(gc_total);
717
718         if (now - last_gc < ip_rt_gc_min_interval &&
719             atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
720                 RT_CACHE_STAT_INC(gc_ignored);
721                 goto out;
722         }
723
724         /* Calculate number of entries, which we want to expire now. */
725         goal = atomic_read(&ipv4_dst_ops.entries) -
726                 (ip_rt_gc_elasticity << rt_hash_log);
727         if (goal <= 0) {
728                 if (equilibrium < ipv4_dst_ops.gc_thresh)
729                         equilibrium = ipv4_dst_ops.gc_thresh;
730                 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
731                 if (goal > 0) {
732                         equilibrium += min_t(unsigned int, goal / 2, rt_hash_mask + 1);
733                         goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
734                 }
735         } else {
736                 /* We are in dangerous area. Try to reduce cache really
737                  * aggressively.
738                  */
739                 goal = max_t(unsigned int, goal / 2, rt_hash_mask + 1);
740                 equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
741         }
742
743         if (now - last_gc >= ip_rt_gc_min_interval)
744                 last_gc = now;
745
746         if (goal <= 0) {
747                 equilibrium += goal;
748                 goto work_done;
749         }
750
751         do {
752                 int i, k;
753
754                 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
755                         unsigned long tmo = expire;
756
757                         k = (k + 1) & rt_hash_mask;
758                         rthp = &rt_hash_table[k].chain;
759                         spin_lock_bh(rt_hash_lock_addr(k));
760                         while ((rth = *rthp) != NULL) {
761                                 if (!rt_may_expire(rth, tmo, expire)) {
762                                         tmo >>= 1;
763                                         rthp = &rth->u.dst.rt_next;
764                                         continue;
765                                 }
766                                 *rthp = rth->u.dst.rt_next;
767                                 rt_free(rth);
768                                 goal--;
769                         }
770                         spin_unlock_bh(rt_hash_lock_addr(k));
771                         if (goal <= 0)
772                                 break;
773                 }
774                 rover = k;
775
776                 if (goal <= 0)
777                         goto work_done;
778
779                 /* Goal is not achieved. We stop process if:
780
781                    - if expire reduced to zero. Otherwise, expire is halfed.
782                    - if table is not full.
783                    - if we are called from interrupt.
784                    - jiffies check is just fallback/debug loop breaker.
785                      We will not spin here for long time in any case.
786                  */
787
788                 RT_CACHE_STAT_INC(gc_goal_miss);
789
790                 if (expire == 0)
791                         break;
792
793                 expire >>= 1;
794 #if RT_CACHE_DEBUG >= 2
795                 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
796                                 atomic_read(&ipv4_dst_ops.entries), goal, i);
797 #endif
798
799                 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
800                         goto out;
801         } while (!in_softirq() && time_before_eq(jiffies, now));
802
803         if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
804                 goto out;
805         if (net_ratelimit())
806                 printk(KERN_WARNING "dst cache overflow\n");
807         RT_CACHE_STAT_INC(gc_dst_overflow);
808         return 1;
809
810 work_done:
811         expire += ip_rt_gc_min_interval;
812         if (expire > ip_rt_gc_timeout ||
813             atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
814                 expire = ip_rt_gc_timeout;
815 #if RT_CACHE_DEBUG >= 2
816         printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
817                         atomic_read(&ipv4_dst_ops.entries), goal, rover);
818 #endif
819 out:    return 0;
820 }
821
822 static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
823 {
824         struct rtable   *rth, **rthp;
825         unsigned long   now;
826         struct rtable *cand, **candp;
827         u32             min_score;
828         int             chain_length;
829         int attempts = !in_softirq();
830
831 restart:
832         chain_length = 0;
833         min_score = ~(u32)0;
834         cand = NULL;
835         candp = NULL;
836         now = jiffies;
837
838         rthp = &rt_hash_table[hash].chain;
839
840         spin_lock_bh(rt_hash_lock_addr(hash));
841         while ((rth = *rthp) != NULL) {
842                 if (compare_keys(&rth->fl, &rt->fl)) {
843                         /* Put it first */
844                         *rthp = rth->u.dst.rt_next;
845                         /*
846                          * Since lookup is lockfree, the deletion
847                          * must be visible to another weakly ordered CPU before
848                          * the insertion at the start of the hash chain.
849                          */
850                         rcu_assign_pointer(rth->u.dst.rt_next,
851                                            rt_hash_table[hash].chain);
852                         /*
853                          * Since lookup is lockfree, the update writes
854                          * must be ordered for consistency on SMP.
855                          */
856                         rcu_assign_pointer(rt_hash_table[hash].chain, rth);
857
858                         dst_use(&rth->u.dst, now);
859                         spin_unlock_bh(rt_hash_lock_addr(hash));
860
861                         rt_drop(rt);
862                         *rp = rth;
863                         return 0;
864                 }
865
866                 if (!atomic_read(&rth->u.dst.__refcnt)) {
867                         u32 score = rt_score(rth);
868
869                         if (score <= min_score) {
870                                 cand = rth;
871                                 candp = rthp;
872                                 min_score = score;
873                         }
874                 }
875
876                 chain_length++;
877
878                 rthp = &rth->u.dst.rt_next;
879         }
880
881         if (cand) {
882                 /* ip_rt_gc_elasticity used to be average length of chain
883                  * length, when exceeded gc becomes really aggressive.
884                  *
885                  * The second limit is less certain. At the moment it allows
886                  * only 2 entries per bucket. We will see.
887                  */
888                 if (chain_length > ip_rt_gc_elasticity) {
889                         *candp = cand->u.dst.rt_next;
890                         rt_free(cand);
891                 }
892         }
893
894         /* Try to bind route to arp only if it is output
895            route or unicast forwarding path.
896          */
897         if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
898                 int err = arp_bind_neighbour(&rt->u.dst);
899                 if (err) {
900                         spin_unlock_bh(rt_hash_lock_addr(hash));
901
902                         if (err != -ENOBUFS) {
903                                 rt_drop(rt);
904                                 return err;
905                         }
906
907                         /* Neighbour tables are full and nothing
908                            can be released. Try to shrink route cache,
909                            it is most likely it holds some neighbour records.
910                          */
911                         if (attempts-- > 0) {
912                                 int saved_elasticity = ip_rt_gc_elasticity;
913                                 int saved_int = ip_rt_gc_min_interval;
914                                 ip_rt_gc_elasticity     = 1;
915                                 ip_rt_gc_min_interval   = 0;
916                                 rt_garbage_collect();
917                                 ip_rt_gc_min_interval   = saved_int;
918                                 ip_rt_gc_elasticity     = saved_elasticity;
919                                 goto restart;
920                         }
921
922                         if (net_ratelimit())
923                                 printk(KERN_WARNING "Neighbour table overflow.\n");
924                         rt_drop(rt);
925                         return -ENOBUFS;
926                 }
927         }
928
929         rt->u.dst.rt_next = rt_hash_table[hash].chain;
930 #if RT_CACHE_DEBUG >= 2
931         if (rt->u.dst.rt_next) {
932                 struct rtable *trt;
933                 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
934                        NIPQUAD(rt->rt_dst));
935                 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
936                         printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
937                 printk("\n");
938         }
939 #endif
940         rt_hash_table[hash].chain = rt;
941         spin_unlock_bh(rt_hash_lock_addr(hash));
942         *rp = rt;
943         return 0;
944 }
945
946 void rt_bind_peer(struct rtable *rt, int create)
947 {
948         static DEFINE_SPINLOCK(rt_peer_lock);
949         struct inet_peer *peer;
950
951         peer = inet_getpeer(rt->rt_dst, create);
952
953         spin_lock_bh(&rt_peer_lock);
954         if (rt->peer == NULL) {
955                 rt->peer = peer;
956                 peer = NULL;
957         }
958         spin_unlock_bh(&rt_peer_lock);
959         if (peer)
960                 inet_putpeer(peer);
961 }
962
963 /*
964  * Peer allocation may fail only in serious out-of-memory conditions.  However
965  * we still can generate some output.
966  * Random ID selection looks a bit dangerous because we have no chances to
967  * select ID being unique in a reasonable period of time.
968  * But broken packet identifier may be better than no packet at all.
969  */
970 static void ip_select_fb_ident(struct iphdr *iph)
971 {
972         static DEFINE_SPINLOCK(ip_fb_id_lock);
973         static u32 ip_fallback_id;
974         u32 salt;
975
976         spin_lock_bh(&ip_fb_id_lock);
977         salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
978         iph->id = htons(salt & 0xFFFF);
979         ip_fallback_id = salt;
980         spin_unlock_bh(&ip_fb_id_lock);
981 }
982
983 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
984 {
985         struct rtable *rt = (struct rtable *) dst;
986
987         if (rt) {
988                 if (rt->peer == NULL)
989                         rt_bind_peer(rt, 1);
990
991                 /* If peer is attached to destination, it is never detached,
992                    so that we need not to grab a lock to dereference it.
993                  */
994                 if (rt->peer) {
995                         iph->id = htons(inet_getid(rt->peer, more));
996                         return;
997                 }
998         } else
999                 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1000                        __builtin_return_address(0));
1001
1002         ip_select_fb_ident(iph);
1003 }
1004
1005 static void rt_del(unsigned hash, struct rtable *rt)
1006 {
1007         struct rtable **rthp;
1008
1009         spin_lock_bh(rt_hash_lock_addr(hash));
1010         ip_rt_put(rt);
1011         for (rthp = &rt_hash_table[hash].chain; *rthp;
1012              rthp = &(*rthp)->u.dst.rt_next)
1013                 if (*rthp == rt) {
1014                         *rthp = rt->u.dst.rt_next;
1015                         rt_free(rt);
1016                         break;
1017                 }
1018         spin_unlock_bh(rt_hash_lock_addr(hash));
1019 }
1020
1021 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1022                     __be32 saddr, struct net_device *dev)
1023 {
1024         int i, k;
1025         struct in_device *in_dev = in_dev_get(dev);
1026         struct rtable *rth, **rthp;
1027         __be32  skeys[2] = { saddr, 0 };
1028         int  ikeys[2] = { dev->ifindex, 0 };
1029         struct netevent_redirect netevent;
1030
1031         if (!in_dev)
1032                 return;
1033
1034         if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1035             || MULTICAST(new_gw) || BADCLASS(new_gw) || ZERONET(new_gw))
1036                 goto reject_redirect;
1037
1038         if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1039                 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1040                         goto reject_redirect;
1041                 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1042                         goto reject_redirect;
1043         } else {
1044                 if (inet_addr_type(new_gw) != RTN_UNICAST)
1045                         goto reject_redirect;
1046         }
1047
1048         for (i = 0; i < 2; i++) {
1049                 for (k = 0; k < 2; k++) {
1050                         unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]);
1051
1052                         rthp=&rt_hash_table[hash].chain;
1053
1054                         rcu_read_lock();
1055                         while ((rth = rcu_dereference(*rthp)) != NULL) {
1056                                 struct rtable *rt;
1057
1058                                 if (rth->fl.fl4_dst != daddr ||
1059                                     rth->fl.fl4_src != skeys[i] ||
1060                                     rth->fl.oif != ikeys[k] ||
1061                                     rth->fl.iif != 0) {
1062                                         rthp = &rth->u.dst.rt_next;
1063                                         continue;
1064                                 }
1065
1066                                 if (rth->rt_dst != daddr ||
1067                                     rth->rt_src != saddr ||
1068                                     rth->u.dst.error ||
1069                                     rth->rt_gateway != old_gw ||
1070                                     rth->u.dst.dev != dev)
1071                                         break;
1072
1073                                 dst_hold(&rth->u.dst);
1074                                 rcu_read_unlock();
1075
1076                                 rt = dst_alloc(&ipv4_dst_ops);
1077                                 if (rt == NULL) {
1078                                         ip_rt_put(rth);
1079                                         in_dev_put(in_dev);
1080                                         return;
1081                                 }
1082
1083                                 /* Copy all the information. */
1084                                 *rt = *rth;
1085                                 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
1086                                 rt->u.dst.__use         = 1;
1087                                 atomic_set(&rt->u.dst.__refcnt, 1);
1088                                 rt->u.dst.child         = NULL;
1089                                 if (rt->u.dst.dev)
1090                                         dev_hold(rt->u.dst.dev);
1091                                 if (rt->idev)
1092                                         in_dev_hold(rt->idev);
1093                                 rt->u.dst.obsolete      = 0;
1094                                 rt->u.dst.lastuse       = jiffies;
1095                                 rt->u.dst.path          = &rt->u.dst;
1096                                 rt->u.dst.neighbour     = NULL;
1097                                 rt->u.dst.hh            = NULL;
1098                                 rt->u.dst.xfrm          = NULL;
1099
1100                                 rt->rt_flags            |= RTCF_REDIRECTED;
1101
1102                                 /* Gateway is different ... */
1103                                 rt->rt_gateway          = new_gw;
1104
1105                                 /* Redirect received -> path was valid */
1106                                 dst_confirm(&rth->u.dst);
1107
1108                                 if (rt->peer)
1109                                         atomic_inc(&rt->peer->refcnt);
1110
1111                                 if (arp_bind_neighbour(&rt->u.dst) ||
1112                                     !(rt->u.dst.neighbour->nud_state &
1113                                             NUD_VALID)) {
1114                                         if (rt->u.dst.neighbour)
1115                                                 neigh_event_send(rt->u.dst.neighbour, NULL);
1116                                         ip_rt_put(rth);
1117                                         rt_drop(rt);
1118                                         goto do_next;
1119                                 }
1120
1121                                 netevent.old = &rth->u.dst;
1122                                 netevent.new = &rt->u.dst;
1123                                 call_netevent_notifiers(NETEVENT_REDIRECT,
1124                                                         &netevent);
1125
1126                                 rt_del(hash, rth);
1127                                 if (!rt_intern_hash(hash, rt, &rt))
1128                                         ip_rt_put(rt);
1129                                 goto do_next;
1130                         }
1131                         rcu_read_unlock();
1132                 do_next:
1133                         ;
1134                 }
1135         }
1136         in_dev_put(in_dev);
1137         return;
1138
1139 reject_redirect:
1140 #ifdef CONFIG_IP_ROUTE_VERBOSE
1141         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1142                 printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
1143                         "%u.%u.%u.%u ignored.\n"
1144                         "  Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
1145                        NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
1146                        NIPQUAD(saddr), NIPQUAD(daddr));
1147 #endif
1148         in_dev_put(in_dev);
1149 }
1150
1151 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1152 {
1153         struct rtable *rt = (struct rtable*)dst;
1154         struct dst_entry *ret = dst;
1155
1156         if (rt) {
1157                 if (dst->obsolete) {
1158                         ip_rt_put(rt);
1159                         ret = NULL;
1160                 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1161                            rt->u.dst.expires) {
1162                         unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1163                                                 rt->fl.oif);
1164 #if RT_CACHE_DEBUG >= 1
1165                         printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
1166                                           "%u.%u.%u.%u/%02x dropped\n",
1167                                 NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
1168 #endif
1169                         rt_del(hash, rt);
1170                         ret = NULL;
1171                 }
1172         }
1173         return ret;
1174 }
1175
1176 /*
1177  * Algorithm:
1178  *      1. The first ip_rt_redirect_number redirects are sent
1179  *         with exponential backoff, then we stop sending them at all,
1180  *         assuming that the host ignores our redirects.
1181  *      2. If we did not see packets requiring redirects
1182  *         during ip_rt_redirect_silence, we assume that the host
1183  *         forgot redirected route and start to send redirects again.
1184  *
1185  * This algorithm is much cheaper and more intelligent than dumb load limiting
1186  * in icmp.c.
1187  *
1188  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1189  * and "frag. need" (breaks PMTU discovery) in icmp.c.
1190  */
1191
1192 void ip_rt_send_redirect(struct sk_buff *skb)
1193 {
1194         struct rtable *rt = (struct rtable*)skb->dst;
1195         struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1196
1197         if (!in_dev)
1198                 return;
1199
1200         if (!IN_DEV_TX_REDIRECTS(in_dev))
1201                 goto out;
1202
1203         /* No redirected packets during ip_rt_redirect_silence;
1204          * reset the algorithm.
1205          */
1206         if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
1207                 rt->u.dst.rate_tokens = 0;
1208
1209         /* Too many ignored redirects; do not send anything
1210          * set u.dst.rate_last to the last seen redirected packet.
1211          */
1212         if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1213                 rt->u.dst.rate_last = jiffies;
1214                 goto out;
1215         }
1216
1217         /* Check for load limit; set rate_last to the latest sent
1218          * redirect.
1219          */
1220         if (rt->u.dst.rate_tokens == 0 ||
1221             time_after(jiffies,
1222                        (rt->u.dst.rate_last +
1223                         (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
1224                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1225                 rt->u.dst.rate_last = jiffies;
1226                 ++rt->u.dst.rate_tokens;
1227 #ifdef CONFIG_IP_ROUTE_VERBOSE
1228                 if (IN_DEV_LOG_MARTIANS(in_dev) &&
1229                     rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1230                     net_ratelimit())
1231                         printk(KERN_WARNING "host %u.%u.%u.%u/if%d ignores "
1232                                 "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
1233                                 NIPQUAD(rt->rt_src), rt->rt_iif,
1234                                 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
1235 #endif
1236         }
1237 out:
1238         in_dev_put(in_dev);
1239 }
1240
1241 static int ip_error(struct sk_buff *skb)
1242 {
1243         struct rtable *rt = (struct rtable*)skb->dst;
1244         unsigned long now;
1245         int code;
1246
1247         switch (rt->u.dst.error) {
1248                 case EINVAL:
1249                 default:
1250                         goto out;
1251                 case EHOSTUNREACH:
1252                         code = ICMP_HOST_UNREACH;
1253                         break;
1254                 case ENETUNREACH:
1255                         code = ICMP_NET_UNREACH;
1256                         IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES);
1257                         break;
1258                 case EACCES:
1259                         code = ICMP_PKT_FILTERED;
1260                         break;
1261         }
1262
1263         now = jiffies;
1264         rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
1265         if (rt->u.dst.rate_tokens > ip_rt_error_burst)
1266                 rt->u.dst.rate_tokens = ip_rt_error_burst;
1267         rt->u.dst.rate_last = now;
1268         if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
1269                 rt->u.dst.rate_tokens -= ip_rt_error_cost;
1270                 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1271         }
1272
1273 out:    kfree_skb(skb);
1274         return 0;
1275 }
1276
1277 /*
1278  *      The last two values are not from the RFC but
1279  *      are needed for AMPRnet AX.25 paths.
1280  */
1281
1282 static const unsigned short mtu_plateau[] =
1283 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1284
1285 static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
1286 {
1287         int i;
1288
1289         for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1290                 if (old_mtu > mtu_plateau[i])
1291                         return mtu_plateau[i];
1292         return 68;
1293 }
1294
1295 unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1296 {
1297         int i;
1298         unsigned short old_mtu = ntohs(iph->tot_len);
1299         struct rtable *rth;
1300         __be32  skeys[2] = { iph->saddr, 0, };
1301         __be32  daddr = iph->daddr;
1302         unsigned short est_mtu = 0;
1303
1304         if (ipv4_config.no_pmtu_disc)
1305                 return 0;
1306
1307         for (i = 0; i < 2; i++) {
1308                 unsigned hash = rt_hash(daddr, skeys[i], 0);
1309
1310                 rcu_read_lock();
1311                 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1312                      rth = rcu_dereference(rth->u.dst.rt_next)) {
1313                         if (rth->fl.fl4_dst == daddr &&
1314                             rth->fl.fl4_src == skeys[i] &&
1315                             rth->rt_dst  == daddr &&
1316                             rth->rt_src  == iph->saddr &&
1317                             rth->fl.iif == 0 &&
1318                             !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
1319                                 unsigned short mtu = new_mtu;
1320
1321                                 if (new_mtu < 68 || new_mtu >= old_mtu) {
1322
1323                                         /* BSD 4.2 compatibility hack :-( */
1324                                         if (mtu == 0 &&
1325                                             old_mtu >= rth->u.dst.metrics[RTAX_MTU-1] &&
1326                                             old_mtu >= 68 + (iph->ihl << 2))
1327                                                 old_mtu -= iph->ihl << 2;
1328
1329                                         mtu = guess_mtu(old_mtu);
1330                                 }
1331                                 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
1332                                         if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
1333                                                 dst_confirm(&rth->u.dst);
1334                                                 if (mtu < ip_rt_min_pmtu) {
1335                                                         mtu = ip_rt_min_pmtu;
1336                                                         rth->u.dst.metrics[RTAX_LOCK-1] |=
1337                                                                 (1 << RTAX_MTU);
1338                                                 }
1339                                                 rth->u.dst.metrics[RTAX_MTU-1] = mtu;
1340                                                 dst_set_expires(&rth->u.dst,
1341                                                         ip_rt_mtu_expires);
1342                                         }
1343                                         est_mtu = mtu;
1344                                 }
1345                         }
1346                 }
1347                 rcu_read_unlock();
1348         }
1349         return est_mtu ? : new_mtu;
1350 }
1351
1352 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1353 {
1354         if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= 68 &&
1355             !(dst_metric_locked(dst, RTAX_MTU))) {
1356                 if (mtu < ip_rt_min_pmtu) {
1357                         mtu = ip_rt_min_pmtu;
1358                         dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
1359                 }
1360                 dst->metrics[RTAX_MTU-1] = mtu;
1361                 dst_set_expires(dst, ip_rt_mtu_expires);
1362                 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1363         }
1364 }
1365
1366 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1367 {
1368         return NULL;
1369 }
1370
1371 static void ipv4_dst_destroy(struct dst_entry *dst)
1372 {
1373         struct rtable *rt = (struct rtable *) dst;
1374         struct inet_peer *peer = rt->peer;
1375         struct in_device *idev = rt->idev;
1376
1377         if (peer) {
1378                 rt->peer = NULL;
1379                 inet_putpeer(peer);
1380         }
1381
1382         if (idev) {
1383                 rt->idev = NULL;
1384                 in_dev_put(idev);
1385         }
1386 }
1387
1388 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1389                             int how)
1390 {
1391         struct rtable *rt = (struct rtable *) dst;
1392         struct in_device *idev = rt->idev;
1393         if (dev != init_net.loopback_dev && idev && idev->dev == dev) {
1394                 struct in_device *loopback_idev = in_dev_get(init_net.loopback_dev);
1395                 if (loopback_idev) {
1396                         rt->idev = loopback_idev;
1397                         in_dev_put(idev);
1398                 }
1399         }
1400 }
1401
1402 static void ipv4_link_failure(struct sk_buff *skb)
1403 {
1404         struct rtable *rt;
1405
1406         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1407
1408         rt = (struct rtable *) skb->dst;
1409         if (rt)
1410                 dst_set_expires(&rt->u.dst, 0);
1411 }
1412
1413 static int ip_rt_bug(struct sk_buff *skb)
1414 {
1415         printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
1416                 NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
1417                 skb->dev ? skb->dev->name : "?");
1418         kfree_skb(skb);
1419         return 0;
1420 }
1421
1422 /*
1423    We do not cache source address of outgoing interface,
1424    because it is used only by IP RR, TS and SRR options,
1425    so that it out of fast path.
1426
1427    BTW remember: "addr" is allowed to be not aligned
1428    in IP options!
1429  */
1430
1431 void ip_rt_get_source(u8 *addr, struct rtable *rt)
1432 {
1433         __be32 src;
1434         struct fib_result res;
1435
1436         if (rt->fl.iif == 0)
1437                 src = rt->rt_src;
1438         else if (fib_lookup(&rt->fl, &res) == 0) {
1439                 src = FIB_RES_PREFSRC(res);
1440                 fib_res_put(&res);
1441         } else
1442                 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
1443                                         RT_SCOPE_UNIVERSE);
1444         memcpy(addr, &src, 4);
1445 }
1446
1447 #ifdef CONFIG_NET_CLS_ROUTE
1448 static void set_class_tag(struct rtable *rt, u32 tag)
1449 {
1450         if (!(rt->u.dst.tclassid & 0xFFFF))
1451                 rt->u.dst.tclassid |= tag & 0xFFFF;
1452         if (!(rt->u.dst.tclassid & 0xFFFF0000))
1453                 rt->u.dst.tclassid |= tag & 0xFFFF0000;
1454 }
1455 #endif
1456
1457 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1458 {
1459         struct fib_info *fi = res->fi;
1460
1461         if (fi) {
1462                 if (FIB_RES_GW(*res) &&
1463                     FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1464                         rt->rt_gateway = FIB_RES_GW(*res);
1465                 memcpy(rt->u.dst.metrics, fi->fib_metrics,
1466                        sizeof(rt->u.dst.metrics));
1467                 if (fi->fib_mtu == 0) {
1468                         rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
1469                         if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) &&
1470                             rt->rt_gateway != rt->rt_dst &&
1471                             rt->u.dst.dev->mtu > 576)
1472                                 rt->u.dst.metrics[RTAX_MTU-1] = 576;
1473                 }
1474 #ifdef CONFIG_NET_CLS_ROUTE
1475                 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1476 #endif
1477         } else
1478                 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
1479
1480         if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1481                 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1482         if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU)
1483                 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1484         if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0)
1485                 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
1486                                        ip_rt_min_advmss);
1487         if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40)
1488                 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1489
1490 #ifdef CONFIG_NET_CLS_ROUTE
1491 #ifdef CONFIG_IP_MULTIPLE_TABLES
1492         set_class_tag(rt, fib_rules_tclass(res));
1493 #endif
1494         set_class_tag(rt, itag);
1495 #endif
1496         rt->rt_type = res->type;
1497 }
1498
1499 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1500                                 u8 tos, struct net_device *dev, int our)
1501 {
1502         unsigned hash;
1503         struct rtable *rth;
1504         __be32 spec_dst;
1505         struct in_device *in_dev = in_dev_get(dev);
1506         u32 itag = 0;
1507
1508         /* Primary sanity checks. */
1509
1510         if (in_dev == NULL)
1511                 return -EINVAL;
1512
1513         if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||
1514             skb->protocol != htons(ETH_P_IP))
1515                 goto e_inval;
1516
1517         if (ZERONET(saddr)) {
1518                 if (!LOCAL_MCAST(daddr))
1519                         goto e_inval;
1520                 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1521         } else if (fib_validate_source(saddr, 0, tos, 0,
1522                                         dev, &spec_dst, &itag) < 0)
1523                 goto e_inval;
1524
1525         rth = dst_alloc(&ipv4_dst_ops);
1526         if (!rth)
1527                 goto e_nobufs;
1528
1529         rth->u.dst.output= ip_rt_bug;
1530
1531         atomic_set(&rth->u.dst.__refcnt, 1);
1532         rth->u.dst.flags= DST_HOST;
1533         if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1534                 rth->u.dst.flags |= DST_NOPOLICY;
1535         rth->fl.fl4_dst = daddr;
1536         rth->rt_dst     = daddr;
1537         rth->fl.fl4_tos = tos;
1538         rth->fl.mark    = skb->mark;
1539         rth->fl.fl4_src = saddr;
1540         rth->rt_src     = saddr;
1541 #ifdef CONFIG_NET_CLS_ROUTE
1542         rth->u.dst.tclassid = itag;
1543 #endif
1544         rth->rt_iif     =
1545         rth->fl.iif     = dev->ifindex;
1546         rth->u.dst.dev  = init_net.loopback_dev;
1547         dev_hold(rth->u.dst.dev);
1548         rth->idev       = in_dev_get(rth->u.dst.dev);
1549         rth->fl.oif     = 0;
1550         rth->rt_gateway = daddr;
1551         rth->rt_spec_dst= spec_dst;
1552         rth->rt_type    = RTN_MULTICAST;
1553         rth->rt_flags   = RTCF_MULTICAST;
1554         if (our) {
1555                 rth->u.dst.input= ip_local_deliver;
1556                 rth->rt_flags |= RTCF_LOCAL;
1557         }
1558
1559 #ifdef CONFIG_IP_MROUTE
1560         if (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
1561                 rth->u.dst.input = ip_mr_input;
1562 #endif
1563         RT_CACHE_STAT_INC(in_slow_mc);
1564
1565         in_dev_put(in_dev);
1566         hash = rt_hash(daddr, saddr, dev->ifindex);
1567         return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
1568
1569 e_nobufs:
1570         in_dev_put(in_dev);
1571         return -ENOBUFS;
1572
1573 e_inval:
1574         in_dev_put(in_dev);
1575         return -EINVAL;
1576 }
1577
1578
1579 static void ip_handle_martian_source(struct net_device *dev,
1580                                      struct in_device *in_dev,
1581                                      struct sk_buff *skb,
1582                                      __be32 daddr,
1583                                      __be32 saddr)
1584 {
1585         RT_CACHE_STAT_INC(in_martian_src);
1586 #ifdef CONFIG_IP_ROUTE_VERBOSE
1587         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1588                 /*
1589                  *      RFC1812 recommendation, if source is martian,
1590                  *      the only hint is MAC header.
1591                  */
1592                 printk(KERN_WARNING "martian source %u.%u.%u.%u from "
1593                         "%u.%u.%u.%u, on dev %s\n",
1594                         NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
1595                 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1596                         int i;
1597                         const unsigned char *p = skb_mac_header(skb);
1598                         printk(KERN_WARNING "ll header: ");
1599                         for (i = 0; i < dev->hard_header_len; i++, p++) {
1600                                 printk("%02x", *p);
1601                                 if (i < (dev->hard_header_len - 1))
1602                                         printk(":");
1603                         }
1604                         printk("\n");
1605                 }
1606         }
1607 #endif
1608 }
1609
1610 static inline int __mkroute_input(struct sk_buff *skb,
1611                                   struct fib_result* res,
1612                                   struct in_device *in_dev,
1613                                   __be32 daddr, __be32 saddr, u32 tos,
1614                                   struct rtable **result)
1615 {
1616
1617         struct rtable *rth;
1618         int err;
1619         struct in_device *out_dev;
1620         unsigned flags = 0;
1621         __be32 spec_dst;
1622         u32 itag;
1623
1624         /* get a working reference to the output device */
1625         out_dev = in_dev_get(FIB_RES_DEV(*res));
1626         if (out_dev == NULL) {
1627                 if (net_ratelimit())
1628                         printk(KERN_CRIT "Bug in ip_route_input" \
1629                                "_slow(). Please, report\n");
1630                 return -EINVAL;
1631         }
1632
1633
1634         err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1635                                   in_dev->dev, &spec_dst, &itag);
1636         if (err < 0) {
1637                 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1638                                          saddr);
1639
1640                 err = -EINVAL;
1641                 goto cleanup;
1642         }
1643
1644         if (err)
1645                 flags |= RTCF_DIRECTSRC;
1646
1647         if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
1648             (IN_DEV_SHARED_MEDIA(out_dev) ||
1649              inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1650                 flags |= RTCF_DOREDIRECT;
1651
1652         if (skb->protocol != htons(ETH_P_IP)) {
1653                 /* Not IP (i.e. ARP). Do not create route, if it is
1654                  * invalid for proxy arp. DNAT routes are always valid.
1655                  */
1656                 if (out_dev == in_dev && !(flags & RTCF_DNAT)) {
1657                         err = -EINVAL;
1658                         goto cleanup;
1659                 }
1660         }
1661
1662
1663         rth = dst_alloc(&ipv4_dst_ops);
1664         if (!rth) {
1665                 err = -ENOBUFS;
1666                 goto cleanup;
1667         }
1668
1669         atomic_set(&rth->u.dst.__refcnt, 1);
1670         rth->u.dst.flags= DST_HOST;
1671         if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1672                 rth->u.dst.flags |= DST_NOPOLICY;
1673         if (IN_DEV_CONF_GET(out_dev, NOXFRM))
1674                 rth->u.dst.flags |= DST_NOXFRM;
1675         rth->fl.fl4_dst = daddr;
1676         rth->rt_dst     = daddr;
1677         rth->fl.fl4_tos = tos;
1678         rth->fl.mark    = skb->mark;
1679         rth->fl.fl4_src = saddr;
1680         rth->rt_src     = saddr;
1681         rth->rt_gateway = daddr;
1682         rth->rt_iif     =
1683                 rth->fl.iif     = in_dev->dev->ifindex;
1684         rth->u.dst.dev  = (out_dev)->dev;
1685         dev_hold(rth->u.dst.dev);
1686         rth->idev       = in_dev_get(rth->u.dst.dev);
1687         rth->fl.oif     = 0;
1688         rth->rt_spec_dst= spec_dst;
1689
1690         rth->u.dst.input = ip_forward;
1691         rth->u.dst.output = ip_output;
1692
1693         rt_set_nexthop(rth, res, itag);
1694
1695         rth->rt_flags = flags;
1696
1697         *result = rth;
1698         err = 0;
1699  cleanup:
1700         /* release the working reference to the output device */
1701         in_dev_put(out_dev);
1702         return err;
1703 }
1704
1705 static inline int ip_mkroute_input(struct sk_buff *skb,
1706                                    struct fib_result* res,
1707                                    const struct flowi *fl,
1708                                    struct in_device *in_dev,
1709                                    __be32 daddr, __be32 saddr, u32 tos)
1710 {
1711         struct rtable* rth = NULL;
1712         int err;
1713         unsigned hash;
1714
1715 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1716         if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
1717                 fib_select_multipath(fl, res);
1718 #endif
1719
1720         /* create a routing cache entry */
1721         err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
1722         if (err)
1723                 return err;
1724
1725         /* put it into the cache */
1726         hash = rt_hash(daddr, saddr, fl->iif);
1727         return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1728 }
1729
1730 /*
1731  *      NOTE. We drop all the packets that has local source
1732  *      addresses, because every properly looped back packet
1733  *      must have correct destination already attached by output routine.
1734  *
1735  *      Such approach solves two big problems:
1736  *      1. Not simplex devices are handled properly.
1737  *      2. IP spoofing attempts are filtered with 100% of guarantee.
1738  */
1739
1740 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1741                                u8 tos, struct net_device *dev)
1742 {
1743         struct fib_result res;
1744         struct in_device *in_dev = in_dev_get(dev);
1745         struct flowi fl = { .nl_u = { .ip4_u =
1746                                       { .daddr = daddr,
1747                                         .saddr = saddr,
1748                                         .tos = tos,
1749                                         .scope = RT_SCOPE_UNIVERSE,
1750                                       } },
1751                             .mark = skb->mark,
1752                             .iif = dev->ifindex };
1753         unsigned        flags = 0;
1754         u32             itag = 0;
1755         struct rtable * rth;
1756         unsigned        hash;
1757         __be32          spec_dst;
1758         int             err = -EINVAL;
1759         int             free_res = 0;
1760
1761         /* IP on this device is disabled. */
1762
1763         if (!in_dev)
1764                 goto out;
1765
1766         /* Check for the most weird martians, which can be not detected
1767            by fib_lookup.
1768          */
1769
1770         if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr))
1771                 goto martian_source;
1772
1773         if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
1774                 goto brd_input;
1775
1776         /* Accept zero addresses only to limited broadcast;
1777          * I even do not know to fix it or not. Waiting for complains :-)
1778          */
1779         if (ZERONET(saddr))
1780                 goto martian_source;
1781
1782         if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
1783                 goto martian_destination;
1784
1785         /*
1786          *      Now we are ready to route packet.
1787          */
1788         if ((err = fib_lookup(&fl, &res)) != 0) {
1789                 if (!IN_DEV_FORWARD(in_dev))
1790                         goto e_hostunreach;
1791                 goto no_route;
1792         }
1793         free_res = 1;
1794
1795         RT_CACHE_STAT_INC(in_slow_tot);
1796
1797         if (res.type == RTN_BROADCAST)
1798                 goto brd_input;
1799
1800         if (res.type == RTN_LOCAL) {
1801                 int result;
1802                 result = fib_validate_source(saddr, daddr, tos,
1803                                              init_net.loopback_dev->ifindex,
1804                                              dev, &spec_dst, &itag);
1805                 if (result < 0)
1806                         goto martian_source;
1807                 if (result)
1808                         flags |= RTCF_DIRECTSRC;
1809                 spec_dst = daddr;
1810                 goto local_input;
1811         }
1812
1813         if (!IN_DEV_FORWARD(in_dev))
1814                 goto e_hostunreach;
1815         if (res.type != RTN_UNICAST)
1816                 goto martian_destination;
1817
1818         err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
1819 done:
1820         in_dev_put(in_dev);
1821         if (free_res)
1822                 fib_res_put(&res);
1823 out:    return err;
1824
1825 brd_input:
1826         if (skb->protocol != htons(ETH_P_IP))
1827                 goto e_inval;
1828
1829         if (ZERONET(saddr))
1830                 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1831         else {
1832                 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1833                                           &itag);
1834                 if (err < 0)
1835                         goto martian_source;
1836                 if (err)
1837                         flags |= RTCF_DIRECTSRC;
1838         }
1839         flags |= RTCF_BROADCAST;
1840         res.type = RTN_BROADCAST;
1841         RT_CACHE_STAT_INC(in_brd);
1842
1843 local_input:
1844         rth = dst_alloc(&ipv4_dst_ops);
1845         if (!rth)
1846                 goto e_nobufs;
1847
1848         rth->u.dst.output= ip_rt_bug;
1849
1850         atomic_set(&rth->u.dst.__refcnt, 1);
1851         rth->u.dst.flags= DST_HOST;
1852         if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1853                 rth->u.dst.flags |= DST_NOPOLICY;
1854         rth->fl.fl4_dst = daddr;
1855         rth->rt_dst     = daddr;
1856         rth->fl.fl4_tos = tos;
1857         rth->fl.mark    = skb->mark;
1858         rth->fl.fl4_src = saddr;
1859         rth->rt_src     = saddr;
1860 #ifdef CONFIG_NET_CLS_ROUTE
1861         rth->u.dst.tclassid = itag;
1862 #endif
1863         rth->rt_iif     =
1864         rth->fl.iif     = dev->ifindex;
1865         rth->u.dst.dev  = init_net.loopback_dev;
1866         dev_hold(rth->u.dst.dev);
1867         rth->idev       = in_dev_get(rth->u.dst.dev);
1868         rth->rt_gateway = daddr;
1869         rth->rt_spec_dst= spec_dst;
1870         rth->u.dst.input= ip_local_deliver;
1871         rth->rt_flags   = flags|RTCF_LOCAL;
1872         if (res.type == RTN_UNREACHABLE) {
1873                 rth->u.dst.input= ip_error;
1874                 rth->u.dst.error= -err;
1875                 rth->rt_flags   &= ~RTCF_LOCAL;
1876         }
1877         rth->rt_type    = res.type;
1878         hash = rt_hash(daddr, saddr, fl.iif);
1879         err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1880         goto done;
1881
1882 no_route:
1883         RT_CACHE_STAT_INC(in_no_route);
1884         spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
1885         res.type = RTN_UNREACHABLE;
1886         if (err == -ESRCH)
1887                 err = -ENETUNREACH;
1888         goto local_input;
1889
1890         /*
1891          *      Do not cache martian addresses: they should be logged (RFC1812)
1892          */
1893 martian_destination:
1894         RT_CACHE_STAT_INC(in_martian_dst);
1895 #ifdef CONFIG_IP_ROUTE_VERBOSE
1896         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1897                 printk(KERN_WARNING "martian destination %u.%u.%u.%u from "
1898                         "%u.%u.%u.%u, dev %s\n",
1899                         NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
1900 #endif
1901
1902 e_hostunreach:
1903         err = -EHOSTUNREACH;
1904         goto done;
1905
1906 e_inval:
1907         err = -EINVAL;
1908         goto done;
1909
1910 e_nobufs:
1911         err = -ENOBUFS;
1912         goto done;
1913
1914 martian_source:
1915         ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1916         goto e_inval;
1917 }
1918
1919 int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1920                    u8 tos, struct net_device *dev)
1921 {
1922         struct rtable * rth;
1923         unsigned        hash;
1924         int iif = dev->ifindex;
1925
1926         tos &= IPTOS_RT_MASK;
1927         hash = rt_hash(daddr, saddr, iif);
1928
1929         rcu_read_lock();
1930         for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1931              rth = rcu_dereference(rth->u.dst.rt_next)) {
1932                 if (rth->fl.fl4_dst == daddr &&
1933                     rth->fl.fl4_src == saddr &&
1934                     rth->fl.iif == iif &&
1935                     rth->fl.oif == 0 &&
1936                     rth->fl.mark == skb->mark &&
1937                     rth->fl.fl4_tos == tos) {
1938                         dst_use(&rth->u.dst, jiffies);
1939                         RT_CACHE_STAT_INC(in_hit);
1940                         rcu_read_unlock();
1941                         skb->dst = (struct dst_entry*)rth;
1942                         return 0;
1943                 }
1944                 RT_CACHE_STAT_INC(in_hlist_search);
1945         }
1946         rcu_read_unlock();
1947
1948         /* Multicast recognition logic is moved from route cache to here.
1949            The problem was that too many Ethernet cards have broken/missing
1950            hardware multicast filters :-( As result the host on multicasting
1951            network acquires a lot of useless route cache entries, sort of
1952            SDR messages from all the world. Now we try to get rid of them.
1953            Really, provided software IP multicast filter is organized
1954            reasonably (at least, hashed), it does not result in a slowdown
1955            comparing with route cache reject entries.
1956            Note, that multicast routers are not affected, because
1957            route cache entry is created eventually.
1958          */
1959         if (MULTICAST(daddr)) {
1960                 struct in_device *in_dev;
1961
1962                 rcu_read_lock();
1963                 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
1964                         int our = ip_check_mc(in_dev, daddr, saddr,
1965                                 ip_hdr(skb)->protocol);
1966                         if (our
1967 #ifdef CONFIG_IP_MROUTE
1968                             || (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
1969 #endif
1970                             ) {
1971                                 rcu_read_unlock();
1972                                 return ip_route_input_mc(skb, daddr, saddr,
1973                                                          tos, dev, our);
1974                         }
1975                 }
1976                 rcu_read_unlock();
1977                 return -EINVAL;
1978         }
1979         return ip_route_input_slow(skb, daddr, saddr, tos, dev);
1980 }
1981
1982 static inline int __mkroute_output(struct rtable **result,
1983                                    struct fib_result* res,
1984                                    const struct flowi *fl,
1985                                    const struct flowi *oldflp,
1986                                    struct net_device *dev_out,
1987                                    unsigned flags)
1988 {
1989         struct rtable *rth;
1990         struct in_device *in_dev;
1991         u32 tos = RT_FL_TOS(oldflp);
1992         int err = 0;
1993
1994         if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
1995                 return -EINVAL;
1996
1997         if (fl->fl4_dst == htonl(0xFFFFFFFF))
1998                 res->type = RTN_BROADCAST;
1999         else if (MULTICAST(fl->fl4_dst))
2000                 res->type = RTN_MULTICAST;
2001         else if (BADCLASS(fl->fl4_dst) || ZERONET(fl->fl4_dst))
2002                 return -EINVAL;
2003
2004         if (dev_out->flags & IFF_LOOPBACK)
2005                 flags |= RTCF_LOCAL;
2006
2007         /* get work reference to inet device */
2008         in_dev = in_dev_get(dev_out);
2009         if (!in_dev)
2010                 return -EINVAL;
2011
2012         if (res->type == RTN_BROADCAST) {
2013                 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2014                 if (res->fi) {
2015                         fib_info_put(res->fi);
2016                         res->fi = NULL;
2017                 }
2018         } else if (res->type == RTN_MULTICAST) {
2019                 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2020                 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2021                                  oldflp->proto))
2022                         flags &= ~RTCF_LOCAL;
2023                 /* If multicast route do not exist use
2024                    default one, but do not gateway in this case.
2025                    Yes, it is hack.
2026                  */
2027                 if (res->fi && res->prefixlen < 4) {
2028                         fib_info_put(res->fi);
2029                         res->fi = NULL;
2030                 }
2031         }
2032
2033
2034         rth = dst_alloc(&ipv4_dst_ops);
2035         if (!rth) {
2036                 err = -ENOBUFS;
2037                 goto cleanup;
2038         }
2039
2040         atomic_set(&rth->u.dst.__refcnt, 1);
2041         rth->u.dst.flags= DST_HOST;
2042         if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2043                 rth->u.dst.flags |= DST_NOXFRM;
2044         if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2045                 rth->u.dst.flags |= DST_NOPOLICY;
2046
2047         rth->fl.fl4_dst = oldflp->fl4_dst;
2048         rth->fl.fl4_tos = tos;
2049         rth->fl.fl4_src = oldflp->fl4_src;
2050         rth->fl.oif     = oldflp->oif;
2051         rth->fl.mark    = oldflp->mark;
2052         rth->rt_dst     = fl->fl4_dst;
2053         rth->rt_src     = fl->fl4_src;
2054         rth->rt_iif     = oldflp->oif ? : dev_out->ifindex;
2055         /* get references to the devices that are to be hold by the routing
2056            cache entry */
2057         rth->u.dst.dev  = dev_out;
2058         dev_hold(dev_out);
2059         rth->idev       = in_dev_get(dev_out);
2060         rth->rt_gateway = fl->fl4_dst;
2061         rth->rt_spec_dst= fl->fl4_src;
2062
2063         rth->u.dst.output=ip_output;
2064
2065         RT_CACHE_STAT_INC(out_slow_tot);
2066
2067         if (flags & RTCF_LOCAL) {
2068                 rth->u.dst.input = ip_local_deliver;
2069                 rth->rt_spec_dst = fl->fl4_dst;
2070         }
2071         if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2072                 rth->rt_spec_dst = fl->fl4_src;
2073                 if (flags & RTCF_LOCAL &&
2074                     !(dev_out->flags & IFF_LOOPBACK)) {
2075                         rth->u.dst.output = ip_mc_output;
2076                         RT_CACHE_STAT_INC(out_slow_mc);
2077                 }
2078 #ifdef CONFIG_IP_MROUTE
2079                 if (res->type == RTN_MULTICAST) {
2080                         if (IN_DEV_MFORWARD(in_dev) &&
2081                             !LOCAL_MCAST(oldflp->fl4_dst)) {
2082                                 rth->u.dst.input = ip_mr_input;
2083                                 rth->u.dst.output = ip_mc_output;
2084                         }
2085                 }
2086 #endif
2087         }
2088
2089         rt_set_nexthop(rth, res, 0);
2090
2091         rth->rt_flags = flags;
2092
2093         *result = rth;
2094  cleanup:
2095         /* release work reference to inet device */
2096         in_dev_put(in_dev);
2097
2098         return err;
2099 }
2100
2101 static inline int ip_mkroute_output(struct rtable **rp,
2102                                     struct fib_result* res,
2103                                     const struct flowi *fl,
2104                                     const struct flowi *oldflp,
2105                                     struct net_device *dev_out,
2106                                     unsigned flags)
2107 {
2108         struct rtable *rth = NULL;
2109         int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2110         unsigned hash;
2111         if (err == 0) {
2112                 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
2113                 err = rt_intern_hash(hash, rth, rp);
2114         }
2115
2116         return err;
2117 }
2118
2119 /*
2120  * Major route resolver routine.
2121  */
2122
2123 static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
2124 {
2125         u32 tos = RT_FL_TOS(oldflp);
2126         struct flowi fl = { .nl_u = { .ip4_u =
2127                                       { .daddr = oldflp->fl4_dst,
2128                                         .saddr = oldflp->fl4_src,
2129                                         .tos = tos & IPTOS_RT_MASK,
2130                                         .scope = ((tos & RTO_ONLINK) ?
2131                                                   RT_SCOPE_LINK :
2132                                                   RT_SCOPE_UNIVERSE),
2133                                       } },
2134                             .mark = oldflp->mark,
2135                             .iif = init_net.loopback_dev->ifindex,
2136                             .oif = oldflp->oif };
2137         struct fib_result res;
2138         unsigned flags = 0;
2139         struct net_device *dev_out = NULL;
2140         int free_res = 0;
2141         int err;
2142
2143
2144         res.fi          = NULL;
2145 #ifdef CONFIG_IP_MULTIPLE_TABLES
2146         res.r           = NULL;
2147 #endif
2148
2149         if (oldflp->fl4_src) {
2150                 err = -EINVAL;
2151                 if (MULTICAST(oldflp->fl4_src) ||
2152                     BADCLASS(oldflp->fl4_src) ||
2153                     ZERONET(oldflp->fl4_src))
2154                         goto out;
2155
2156                 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2157                 dev_out = ip_dev_find(oldflp->fl4_src);
2158                 if (dev_out == NULL)
2159                         goto out;
2160
2161                 /* I removed check for oif == dev_out->oif here.
2162                    It was wrong for two reasons:
2163                    1. ip_dev_find(saddr) can return wrong iface, if saddr is
2164                       assigned to multiple interfaces.
2165                    2. Moreover, we are allowed to send packets with saddr
2166                       of another iface. --ANK
2167                  */
2168
2169                 if (oldflp->oif == 0
2170                     && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2171                         /* Special hack: user can direct multicasts
2172                            and limited broadcast via necessary interface
2173                            without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2174                            This hack is not just for fun, it allows
2175                            vic,vat and friends to work.
2176                            They bind socket to loopback, set ttl to zero
2177                            and expect that it will work.
2178                            From the viewpoint of routing cache they are broken,
2179                            because we are not allowed to build multicast path
2180                            with loopback source addr (look, routing cache
2181                            cannot know, that ttl is zero, so that packet
2182                            will not leave this host and route is valid).
2183                            Luckily, this hack is good workaround.
2184                          */
2185
2186                         fl.oif = dev_out->ifindex;
2187                         goto make_route;
2188                 }
2189                 if (dev_out)
2190                         dev_put(dev_out);
2191                 dev_out = NULL;
2192         }
2193
2194
2195         if (oldflp->oif) {
2196                 dev_out = dev_get_by_index(&init_net, oldflp->oif);
2197                 err = -ENODEV;
2198                 if (dev_out == NULL)
2199                         goto out;
2200
2201                 /* RACE: Check return value of inet_select_addr instead. */
2202                 if (__in_dev_get_rtnl(dev_out) == NULL) {
2203                         dev_put(dev_out);
2204                         goto out;       /* Wrong error code */
2205                 }
2206
2207                 if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
2208                         if (!fl.fl4_src)
2209                                 fl.fl4_src = inet_select_addr(dev_out, 0,
2210                                                               RT_SCOPE_LINK);
2211                         goto make_route;
2212                 }
2213                 if (!fl.fl4_src) {
2214                         if (MULTICAST(oldflp->fl4_dst))
2215                                 fl.fl4_src = inet_select_addr(dev_out, 0,
2216                                                               fl.fl4_scope);
2217                         else if (!oldflp->fl4_dst)
2218                                 fl.fl4_src = inet_select_addr(dev_out, 0,
2219                                                               RT_SCOPE_HOST);
2220                 }
2221         }
2222
2223         if (!fl.fl4_dst) {
2224                 fl.fl4_dst = fl.fl4_src;
2225                 if (!fl.fl4_dst)
2226                         fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2227                 if (dev_out)
2228                         dev_put(dev_out);
2229                 dev_out = init_net.loopback_dev;
2230                 dev_hold(dev_out);
2231                 fl.oif = init_net.loopback_dev->ifindex;
2232                 res.type = RTN_LOCAL;
2233                 flags |= RTCF_LOCAL;
2234                 goto make_route;
2235         }
2236
2237         if (fib_lookup(&fl, &res)) {
2238                 res.fi = NULL;
2239                 if (oldflp->oif) {
2240                         /* Apparently, routing tables are wrong. Assume,
2241                            that the destination is on link.
2242
2243                            WHY? DW.
2244                            Because we are allowed to send to iface
2245                            even if it has NO routes and NO assigned
2246                            addresses. When oif is specified, routing
2247                            tables are looked up with only one purpose:
2248                            to catch if destination is gatewayed, rather than
2249                            direct. Moreover, if MSG_DONTROUTE is set,
2250                            we send packet, ignoring both routing tables
2251                            and ifaddr state. --ANK
2252
2253
2254                            We could make it even if oif is unknown,
2255                            likely IPv6, but we do not.
2256                          */
2257
2258                         if (fl.fl4_src == 0)
2259                                 fl.fl4_src = inet_select_addr(dev_out, 0,
2260                                                               RT_SCOPE_LINK);
2261                         res.type = RTN_UNICAST;
2262                         goto make_route;
2263                 }
2264                 if (dev_out)
2265                         dev_put(dev_out);
2266                 err = -ENETUNREACH;
2267                 goto out;
2268         }
2269         free_res = 1;
2270
2271         if (res.type == RTN_LOCAL) {
2272                 if (!fl.fl4_src)
2273                         fl.fl4_src = fl.fl4_dst;
2274                 if (dev_out)
2275                         dev_put(dev_out);
2276                 dev_out = init_net.loopback_dev;
2277                 dev_hold(dev_out);
2278                 fl.oif = dev_out->ifindex;
2279                 if (res.fi)
2280                         fib_info_put(res.fi);
2281                 res.fi = NULL;
2282                 flags |= RTCF_LOCAL;
2283                 goto make_route;
2284         }
2285
2286 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2287         if (res.fi->fib_nhs > 1 && fl.oif == 0)
2288                 fib_select_multipath(&fl, &res);
2289         else
2290 #endif
2291         if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2292                 fib_select_default(&fl, &res);
2293
2294         if (!fl.fl4_src)
2295                 fl.fl4_src = FIB_RES_PREFSRC(res);
2296
2297         if (dev_out)
2298                 dev_put(dev_out);
2299         dev_out = FIB_RES_DEV(res);
2300         dev_hold(dev_out);
2301         fl.oif = dev_out->ifindex;
2302
2303
2304 make_route:
2305         err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2306
2307
2308         if (free_res)
2309                 fib_res_put(&res);
2310         if (dev_out)
2311                 dev_put(dev_out);
2312 out:    return err;
2313 }
2314
2315 int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2316 {
2317         unsigned hash;
2318         struct rtable *rth;
2319
2320         hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif);
2321
2322         rcu_read_lock_bh();
2323         for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2324                 rth = rcu_dereference(rth->u.dst.rt_next)) {
2325                 if (rth->fl.fl4_dst == flp->fl4_dst &&
2326                     rth->fl.fl4_src == flp->fl4_src &&
2327                     rth->fl.iif == 0 &&
2328                     rth->fl.oif == flp->oif &&
2329                     rth->fl.mark == flp->mark &&
2330                     !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2331                             (IPTOS_RT_MASK | RTO_ONLINK))) {
2332                         dst_use(&rth->u.dst, jiffies);
2333                         RT_CACHE_STAT_INC(out_hit);
2334                         rcu_read_unlock_bh();
2335                         *rp = rth;
2336                         return 0;
2337                 }
2338                 RT_CACHE_STAT_INC(out_hlist_search);
2339         }
2340         rcu_read_unlock_bh();
2341
2342         return ip_route_output_slow(rp, flp);
2343 }
2344
2345 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2346
2347 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2348 {
2349 }
2350
2351 static struct dst_ops ipv4_dst_blackhole_ops = {
2352         .family                 =       AF_INET,
2353         .protocol               =       __constant_htons(ETH_P_IP),
2354         .destroy                =       ipv4_dst_destroy,
2355         .check                  =       ipv4_dst_check,
2356         .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
2357         .entry_size             =       sizeof(struct rtable),
2358 };
2359
2360
2361 static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock *sk)
2362 {
2363         struct rtable *ort = *rp;
2364         struct rtable *rt = (struct rtable *)
2365                 dst_alloc(&ipv4_dst_blackhole_ops);
2366
2367         if (rt) {
2368                 struct dst_entry *new = &rt->u.dst;
2369
2370                 atomic_set(&new->__refcnt, 1);
2371                 new->__use = 1;
2372                 new->input = dst_discard;
2373                 new->output = dst_discard;
2374                 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
2375
2376                 new->dev = ort->u.dst.dev;
2377                 if (new->dev)
2378                         dev_hold(new->dev);
2379
2380                 rt->fl = ort->fl;
2381
2382                 rt->idev = ort->idev;
2383                 if (rt->idev)
2384                         in_dev_hold(rt->idev);
2385                 rt->rt_flags = ort->rt_flags;
2386                 rt->rt_type = ort->rt_type;
2387                 rt->rt_dst = ort->rt_dst;
2388                 rt->rt_src = ort->rt_src;
2389                 rt->rt_iif = ort->rt_iif;
2390                 rt->rt_gateway = ort->rt_gateway;
2391                 rt->rt_spec_dst = ort->rt_spec_dst;
2392                 rt->peer = ort->peer;
2393                 if (rt->peer)
2394                         atomic_inc(&rt->peer->refcnt);
2395
2396                 dst_free(new);
2397         }
2398
2399         dst_release(&(*rp)->u.dst);
2400         *rp = rt;
2401         return (rt ? 0 : -ENOMEM);
2402 }
2403
2404 int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags)
2405 {
2406         int err;
2407
2408         if ((err = __ip_route_output_key(rp, flp)) != 0)
2409                 return err;
2410
2411         if (flp->proto) {
2412                 if (!flp->fl4_src)
2413                         flp->fl4_src = (*rp)->rt_src;
2414                 if (!flp->fl4_dst)
2415                         flp->fl4_dst = (*rp)->rt_dst;
2416                 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, flags);
2417                 if (err == -EREMOTE)
2418                         err = ipv4_dst_blackhole(rp, flp, sk);
2419
2420                 return err;
2421         }
2422
2423         return 0;
2424 }
2425
2426 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2427
2428 int ip_route_output_key(struct rtable **rp, struct flowi *flp)
2429 {
2430         return ip_route_output_flow(rp, flp, NULL, 0);
2431 }
2432
2433 static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2434                         int nowait, unsigned int flags)
2435 {
2436         struct rtable *rt = (struct rtable*)skb->dst;
2437         struct rtmsg *r;
2438         struct nlmsghdr *nlh;
2439         long expires;
2440         u32 id = 0, ts = 0, tsage = 0, error;
2441
2442         nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2443         if (nlh == NULL)
2444                 return -EMSGSIZE;
2445
2446         r = nlmsg_data(nlh);
2447         r->rtm_family    = AF_INET;
2448         r->rtm_dst_len  = 32;
2449         r->rtm_src_len  = 0;
2450         r->rtm_tos      = rt->fl.fl4_tos;
2451         r->rtm_table    = RT_TABLE_MAIN;
2452         NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2453         r->rtm_type     = rt->rt_type;
2454         r->rtm_scope    = RT_SCOPE_UNIVERSE;
2455         r->rtm_protocol = RTPROT_UNSPEC;
2456         r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2457         if (rt->rt_flags & RTCF_NOTIFY)
2458                 r->rtm_flags |= RTM_F_NOTIFY;
2459
2460         NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2461
2462         if (rt->fl.fl4_src) {
2463                 r->rtm_src_len = 32;
2464                 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2465         }
2466         if (rt->u.dst.dev)
2467                 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
2468 #ifdef CONFIG_NET_CLS_ROUTE
2469         if (rt->u.dst.tclassid)
2470                 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
2471 #endif
2472         if (rt->fl.iif)
2473                 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2474         else if (rt->rt_src != rt->fl.fl4_src)
2475                 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2476
2477         if (rt->rt_dst != rt->rt_gateway)
2478                 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2479
2480         if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2481                 goto nla_put_failure;
2482
2483         error = rt->u.dst.error;
2484         expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2485         if (rt->peer) {
2486                 id = rt->peer->ip_id_count;
2487                 if (rt->peer->tcp_ts_stamp) {
2488                         ts = rt->peer->tcp_ts;
2489                         tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2490                 }
2491         }
2492
2493         if (rt->fl.iif) {
2494 #ifdef CONFIG_IP_MROUTE
2495                 __be32 dst = rt->rt_dst;
2496
2497                 if (MULTICAST(dst) && !LOCAL_MCAST(dst) &&
2498                     IPV4_DEVCONF_ALL(MC_FORWARDING)) {
2499                         int err = ipmr_get_route(skb, r, nowait);
2500                         if (err <= 0) {
2501                                 if (!nowait) {
2502                                         if (err == 0)
2503                                                 return 0;
2504                                         goto nla_put_failure;
2505                                 } else {
2506                                         if (err == -EMSGSIZE)
2507                                                 goto nla_put_failure;
2508                                         error = err;
2509                                 }
2510                         }
2511                 } else
2512 #endif
2513                         NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2514         }
2515
2516         if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
2517                                expires, error) < 0)
2518                 goto nla_put_failure;
2519
2520         return nlmsg_end(skb, nlh);
2521
2522 nla_put_failure:
2523         nlmsg_cancel(skb, nlh);
2524         return -EMSGSIZE;
2525 }
2526
2527 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2528 {
2529         struct rtmsg *rtm;
2530         struct nlattr *tb[RTA_MAX+1];
2531         struct rtable *rt = NULL;
2532         __be32 dst = 0;
2533         __be32 src = 0;
2534         u32 iif;
2535         int err;
2536         struct sk_buff *skb;
2537
2538         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2539         if (err < 0)
2540                 goto errout;
2541
2542         rtm = nlmsg_data(nlh);
2543
2544         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2545         if (skb == NULL) {
2546                 err = -ENOBUFS;
2547                 goto errout;
2548         }
2549
2550         /* Reserve room for dummy headers, this skb can pass
2551            through good chunk of routing engine.
2552          */
2553         skb_reset_mac_header(skb);
2554         skb_reset_network_header(skb);
2555
2556         /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2557         ip_hdr(skb)->protocol = IPPROTO_ICMP;
2558         skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2559
2560         src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2561         dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2562         iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2563
2564         if (iif) {
2565                 struct net_device *dev;
2566
2567                 dev = __dev_get_by_index(&init_net, iif);
2568                 if (dev == NULL) {
2569                         err = -ENODEV;
2570                         goto errout_free;
2571                 }
2572
2573                 skb->protocol   = htons(ETH_P_IP);
2574                 skb->dev        = dev;
2575                 local_bh_disable();
2576                 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2577                 local_bh_enable();
2578
2579                 rt = (struct rtable*) skb->dst;
2580                 if (err == 0 && rt->u.dst.error)
2581                         err = -rt->u.dst.error;
2582         } else {
2583                 struct flowi fl = {
2584                         .nl_u = {
2585                                 .ip4_u = {
2586                                         .daddr = dst,
2587                                         .saddr = src,
2588                                         .tos = rtm->rtm_tos,
2589                                 },
2590                         },
2591                         .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2592                 };
2593                 err = ip_route_output_key(&rt, &fl);
2594         }
2595
2596         if (err)
2597                 goto errout_free;
2598
2599         skb->dst = &rt->u.dst;
2600         if (rtm->rtm_flags & RTM_F_NOTIFY)
2601                 rt->rt_flags |= RTCF_NOTIFY;
2602
2603         err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2604                                 RTM_NEWROUTE, 0, 0);
2605         if (err <= 0)
2606                 goto errout_free;
2607
2608         err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
2609 errout:
2610         return err;
2611
2612 errout_free:
2613         kfree_skb(skb);
2614         goto errout;
2615 }
2616
2617 int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
2618 {
2619         struct rtable *rt;
2620         int h, s_h;
2621         int idx, s_idx;
2622
2623         s_h = cb->args[0];
2624         if (s_h < 0)
2625                 s_h = 0;
2626         s_idx = idx = cb->args[1];
2627         for (h = s_h; h <= rt_hash_mask; h++) {
2628                 rcu_read_lock_bh();
2629                 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2630                      rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2631                         if (idx < s_idx)
2632                                 continue;
2633                         skb->dst = dst_clone(&rt->u.dst);
2634                         if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
2635                                          cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2636                                          1, NLM_F_MULTI) <= 0) {
2637                                 dst_release(xchg(&skb->dst, NULL));
2638                                 rcu_read_unlock_bh();
2639                                 goto done;
2640                         }
2641                         dst_release(xchg(&skb->dst, NULL));
2642                 }
2643                 rcu_read_unlock_bh();
2644                 s_idx = 0;
2645         }
2646
2647 done:
2648         cb->args[0] = h;
2649         cb->args[1] = idx;
2650         return skb->len;
2651 }
2652
2653 void ip_rt_multicast_event(struct in_device *in_dev)
2654 {
2655         rt_cache_flush(0);
2656 }
2657
2658 #ifdef CONFIG_SYSCTL
2659 static int flush_delay;
2660
2661 static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
2662                                         struct file *filp, void __user *buffer,
2663                                         size_t *lenp, loff_t *ppos)
2664 {
2665         if (write) {
2666                 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2667                 rt_cache_flush(flush_delay);
2668                 return 0;
2669         }
2670
2671         return -EINVAL;
2672 }
2673
2674 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2675                                                 int __user *name,
2676                                                 int nlen,
2677                                                 void __user *oldval,
2678                                                 size_t __user *oldlenp,
2679                                                 void __user *newval,
2680                                                 size_t newlen)
2681 {
2682         int delay;
2683         if (newlen != sizeof(int))
2684                 return -EINVAL;
2685         if (get_user(delay, (int __user *)newval))
2686                 return -EFAULT;
2687         rt_cache_flush(delay);
2688         return 0;
2689 }
2690
2691 ctl_table ipv4_route_table[] = {
2692         {
2693                 .ctl_name       = NET_IPV4_ROUTE_FLUSH,
2694                 .procname       = "flush",
2695                 .data           = &flush_delay,
2696                 .maxlen         = sizeof(int),
2697                 .mode           = 0200,
2698                 .proc_handler   = &ipv4_sysctl_rtcache_flush,
2699                 .strategy       = &ipv4_sysctl_rtcache_flush_strategy,
2700         },
2701         {
2702                 .ctl_name       = NET_IPV4_ROUTE_MIN_DELAY,
2703                 .procname       = "min_delay",
2704                 .data           = &ip_rt_min_delay,
2705                 .maxlen         = sizeof(int),
2706                 .mode           = 0644,
2707                 .proc_handler   = &proc_dointvec_jiffies,
2708                 .strategy       = &sysctl_jiffies,
2709         },
2710         {
2711                 .ctl_name       = NET_IPV4_ROUTE_MAX_DELAY,
2712                 .procname       = "max_delay",
2713                 .data           = &ip_rt_max_delay,
2714                 .maxlen         = sizeof(int),
2715                 .mode           = 0644,
2716                 .proc_handler   = &proc_dointvec_jiffies,
2717                 .strategy       = &sysctl_jiffies,
2718         },
2719         {
2720                 .ctl_name       = NET_IPV4_ROUTE_GC_THRESH,
2721                 .procname       = "gc_thresh",
2722                 .data           = &ipv4_dst_ops.gc_thresh,
2723                 .maxlen         = sizeof(int),
2724                 .mode           = 0644,
2725                 .proc_handler   = &proc_dointvec,
2726         },
2727         {
2728                 .ctl_name       = NET_IPV4_ROUTE_MAX_SIZE,
2729                 .procname       = "max_size",
2730                 .data           = &ip_rt_max_size,
2731                 .maxlen         = sizeof(int),
2732                 .mode           = 0644,
2733                 .proc_handler   = &proc_dointvec,
2734         },
2735         {
2736                 /*  Deprecated. Use gc_min_interval_ms */
2737
2738                 .ctl_name       = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
2739                 .procname       = "gc_min_interval",
2740                 .data           = &ip_rt_gc_min_interval,
2741                 .maxlen         = sizeof(int),
2742                 .mode           = 0644,
2743                 .proc_handler   = &proc_dointvec_jiffies,
2744                 .strategy       = &sysctl_jiffies,
2745         },
2746         {
2747                 .ctl_name       = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
2748                 .procname       = "gc_min_interval_ms",
2749                 .data           = &ip_rt_gc_min_interval,
2750                 .maxlen         = sizeof(int),
2751                 .mode           = 0644,
2752                 .proc_handler   = &proc_dointvec_ms_jiffies,
2753                 .strategy       = &sysctl_ms_jiffies,
2754         },
2755         {
2756                 .ctl_name       = NET_IPV4_ROUTE_GC_TIMEOUT,
2757                 .procname       = "gc_timeout",
2758                 .data           = &ip_rt_gc_timeout,
2759                 .maxlen         = sizeof(int),
2760                 .mode           = 0644,
2761                 .proc_handler   = &proc_dointvec_jiffies,
2762                 .strategy       = &sysctl_jiffies,
2763         },
2764         {
2765                 .ctl_name       = NET_IPV4_ROUTE_GC_INTERVAL,
2766                 .procname       = "gc_interval",
2767                 .data           = &ip_rt_gc_interval,
2768                 .maxlen         = sizeof(int),
2769                 .mode           = 0644,
2770                 .proc_handler   = &proc_dointvec_jiffies,
2771                 .strategy       = &sysctl_jiffies,
2772         },
2773         {
2774                 .ctl_name       = NET_IPV4_ROUTE_REDIRECT_LOAD,
2775                 .procname       = "redirect_load",
2776                 .data           = &ip_rt_redirect_load,
2777                 .maxlen         = sizeof(int),
2778                 .mode           = 0644,
2779                 .proc_handler   = &proc_dointvec,
2780         },
2781         {
2782                 .ctl_name       = NET_IPV4_ROUTE_REDIRECT_NUMBER,
2783                 .procname       = "redirect_number",
2784                 .data           = &ip_rt_redirect_number,
2785                 .maxlen         = sizeof(int),
2786                 .mode           = 0644,
2787                 .proc_handler   = &proc_dointvec,
2788         },
2789         {
2790                 .ctl_name       = NET_IPV4_ROUTE_REDIRECT_SILENCE,
2791                 .procname       = "redirect_silence",
2792                 .data           = &ip_rt_redirect_silence,
2793                 .maxlen         = sizeof(int),
2794                 .mode           = 0644,
2795                 .proc_handler   = &proc_dointvec,
2796         },
2797         {
2798                 .ctl_name       = NET_IPV4_ROUTE_ERROR_COST,
2799                 .procname       = "error_cost",
2800                 .data           = &ip_rt_error_cost,
2801                 .maxlen         = sizeof(int),
2802                 .mode           = 0644,
2803                 .proc_handler   = &proc_dointvec,
2804         },
2805         {
2806                 .ctl_name       = NET_IPV4_ROUTE_ERROR_BURST,
2807                 .procname       = "error_burst",
2808                 .data           = &ip_rt_error_burst,
2809                 .maxlen         = sizeof(int),
2810                 .mode           = 0644,
2811                 .proc_handler   = &proc_dointvec,
2812         },
2813         {
2814                 .ctl_name       = NET_IPV4_ROUTE_GC_ELASTICITY,
2815                 .procname       = "gc_elasticity",
2816                 .data           = &ip_rt_gc_elasticity,
2817                 .maxlen         = sizeof(int),
2818                 .mode           = 0644,
2819                 .proc_handler   = &proc_dointvec,
2820         },
2821         {
2822                 .ctl_name       = NET_IPV4_ROUTE_MTU_EXPIRES,
2823                 .procname       = "mtu_expires",
2824                 .data           = &ip_rt_mtu_expires,
2825                 .maxlen         = sizeof(int),
2826                 .mode           = 0644,
2827                 .proc_handler   = &proc_dointvec_jiffies,
2828                 .strategy       = &sysctl_jiffies,
2829         },
2830         {
2831                 .ctl_name       = NET_IPV4_ROUTE_MIN_PMTU,
2832                 .procname       = "min_pmtu",
2833                 .data           = &ip_rt_min_pmtu,
2834                 .maxlen         = sizeof(int),
2835                 .mode           = 0644,
2836                 .proc_handler   = &proc_dointvec,
2837         },
2838         {
2839                 .ctl_name       = NET_IPV4_ROUTE_MIN_ADVMSS,
2840                 .procname       = "min_adv_mss",
2841                 .data           = &ip_rt_min_advmss,
2842                 .maxlen         = sizeof(int),
2843                 .mode           = 0644,
2844                 .proc_handler   = &proc_dointvec,
2845         },
2846         {
2847                 .ctl_name       = NET_IPV4_ROUTE_SECRET_INTERVAL,
2848                 .procname       = "secret_interval",
2849                 .data           = &ip_rt_secret_interval,
2850                 .maxlen         = sizeof(int),
2851                 .mode           = 0644,
2852                 .proc_handler   = &proc_dointvec_jiffies,
2853                 .strategy       = &sysctl_jiffies,
2854         },
2855         { .ctl_name = 0 }
2856 };
2857 #endif
2858
2859 #ifdef CONFIG_NET_CLS_ROUTE
2860 struct ip_rt_acct *ip_rt_acct;
2861
2862 /* This code sucks.  But you should have seen it before! --RR */
2863
2864 /* IP route accounting ptr for this logical cpu number. */
2865 #define IP_RT_ACCT_CPU(i) (ip_rt_acct + i * 256)
2866
2867 #ifdef CONFIG_PROC_FS
2868 static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
2869                            int length, int *eof, void *data)
2870 {
2871         unsigned int i;
2872
2873         if ((offset & 3) || (length & 3))
2874                 return -EIO;
2875
2876         if (offset >= sizeof(struct ip_rt_acct) * 256) {
2877                 *eof = 1;
2878                 return 0;
2879         }
2880
2881         if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
2882                 length = sizeof(struct ip_rt_acct) * 256 - offset;
2883                 *eof = 1;
2884         }
2885
2886         offset /= sizeof(u32);
2887
2888         if (length > 0) {
2889                 u32 *dst = (u32 *) buffer;
2890
2891                 *start = buffer;
2892                 memset(dst, 0, length);
2893
2894                 for_each_possible_cpu(i) {
2895                         unsigned int j;
2896                         u32 *src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
2897
2898                         for (j = 0; j < length/4; j++)
2899                                 dst[j] += src[j];
2900                 }
2901         }
2902         return length;
2903 }
2904 #endif /* CONFIG_PROC_FS */
2905 #endif /* CONFIG_NET_CLS_ROUTE */
2906
2907 static __initdata unsigned long rhash_entries;
2908 static int __init set_rhash_entries(char *str)
2909 {
2910         if (!str)
2911                 return 0;
2912         rhash_entries = simple_strtoul(str, &str, 0);
2913         return 1;
2914 }
2915 __setup("rhash_entries=", set_rhash_entries);
2916
2917 int __init ip_rt_init(void)
2918 {
2919         int rc = 0;
2920
2921         rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^
2922                              (jiffies ^ (jiffies >> 7)));
2923
2924 #ifdef CONFIG_NET_CLS_ROUTE
2925         {
2926         int order;
2927         for (order = 0;
2928              (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
2929                 /* NOTHING */;
2930         ip_rt_acct = (struct ip_rt_acct *)__get_free_pages(GFP_KERNEL, order);
2931         if (!ip_rt_acct)
2932                 panic("IP: failed to allocate ip_rt_acct\n");
2933         memset(ip_rt_acct, 0, PAGE_SIZE << order);
2934         }
2935 #endif
2936
2937         ipv4_dst_ops.kmem_cachep =
2938                 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2939                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2940
2941         ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2942
2943         rt_hash_table = (struct rt_hash_bucket *)
2944                 alloc_large_system_hash("IP route cache",
2945                                         sizeof(struct rt_hash_bucket),
2946                                         rhash_entries,
2947                                         (num_physpages >= 128 * 1024) ?
2948                                         15 : 17,
2949                                         0,
2950                                         &rt_hash_log,
2951                                         &rt_hash_mask,
2952                                         0);
2953         memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
2954         rt_hash_lock_init();
2955
2956         ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
2957         ip_rt_max_size = (rt_hash_mask + 1) * 16;
2958
2959         devinet_init();
2960         ip_fib_init();
2961
2962         setup_timer(&rt_flush_timer, rt_run_flush, 0);
2963         setup_timer(&rt_secret_timer, rt_secret_rebuild, 0);
2964
2965         /* All the timers, started at system startup tend
2966            to synchronize. Perturb it a bit.
2967          */
2968         schedule_delayed_work(&expires_work,
2969                 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
2970
2971         rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval +
2972                 ip_rt_secret_interval;
2973         add_timer(&rt_secret_timer);
2974
2975 #ifdef CONFIG_PROC_FS
2976         {
2977         struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
2978         if (!proc_net_fops_create(&init_net, "rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
2979             !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
2980                                              init_net.proc_net_stat))) {
2981                 return -ENOMEM;
2982         }
2983         rtstat_pde->proc_fops = &rt_cpu_seq_fops;
2984         }
2985 #ifdef CONFIG_NET_CLS_ROUTE
2986         create_proc_read_entry("rt_acct", 0, init_net.proc_net, ip_rt_acct_read, NULL);
2987 #endif
2988 #endif
2989 #ifdef CONFIG_XFRM
2990         xfrm_init();
2991         xfrm4_init();
2992 #endif
2993         rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
2994
2995         return rc;
2996 }
2997
2998 EXPORT_SYMBOL(__ip_select_ident);
2999 EXPORT_SYMBOL(ip_route_input);
3000 EXPORT_SYMBOL(ip_route_output_key);