*
* ROUTE - implementation of the IP router.
*
- * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
- *
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* (rco@di.uminho.pt) Routing table insertion and update
* Linus Torvalds : Rewrote bits to be sensible
* Alan Cox : Added BSD route gw semantics
- * Alan Cox : Super /proc >4K
+ * Alan Cox : Super /proc >4K
* Alan Cox : MTU in route table
* Alan Cox : MSS actually. Also added the window
* clamper.
* Alan Cox : Faster /proc handling
* Alexey Kuznetsov : Massive rework to support tree based routing,
* routing caches and better behaviour.
- *
+ *
* Olaf Erb : irtt wasn't being copied right.
* Bjorn Ekwall : Kerneld route support.
* Alan Cox : Multicast fixed (I hope)
* Marc Boucher : routing by fwmark
* Robert Olsson : Added rt_cache statistics
* Arnaldo C. Melo : Convert proc stuff to seq_file
- * Eric Dumazet : hashed spinlocks
+ * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
+ * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
+ * Ilia Sotnikov : Removed TOS from hash calculations
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
+#include <linux/workqueue.h>
#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/jhash.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
+#include <net/dst.h>
+#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
-#include <net/ip_mp_alg.h>
+#include <net/netevent.h>
+#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#define RT_GC_TIMEOUT (300*HZ)
-static int ip_rt_min_delay = 2 * HZ;
-static int ip_rt_max_delay = 10 * HZ;
static int ip_rt_max_size;
-static int ip_rt_gc_timeout = RT_GC_TIMEOUT;
-static int ip_rt_gc_interval = 60 * HZ;
-static int ip_rt_gc_min_interval = HZ / 2;
-static int ip_rt_redirect_number = 9;
-static int ip_rt_redirect_load = HZ / 50;
-static int ip_rt_redirect_silence = ((HZ / 50) << (9 + 1));
-static int ip_rt_error_cost = HZ;
-static int ip_rt_error_burst = 5 * HZ;
-static int ip_rt_gc_elasticity = 8;
-static int ip_rt_mtu_expires = 10 * 60 * HZ;
-static int ip_rt_min_pmtu = 512 + 20 + 20;
-static int ip_rt_min_advmss = 256;
-static int ip_rt_secret_interval = 10 * 60 * HZ;
-static unsigned long rt_deadline;
-
-#define RTprint(a...) printk(KERN_DEBUG a)
-
-static struct timer_list rt_flush_timer;
-static struct timer_list rt_periodic_timer;
-static struct timer_list rt_secret_timer;
+static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly = 60 * HZ;
+static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
+static int ip_rt_redirect_number __read_mostly = 9;
+static int ip_rt_redirect_load __read_mostly = HZ / 50;
+static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
+static int ip_rt_error_cost __read_mostly = HZ;
+static int ip_rt_error_burst __read_mostly = 5 * HZ;
+static int ip_rt_gc_elasticity __read_mostly = 8;
+static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
+static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
+static int ip_rt_min_advmss __read_mostly = 256;
+static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
+static int rt_chain_length_max __read_mostly = 20;
+
+static void rt_worker_func(struct work_struct *work);
+static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
/*
* Interface to generic destination cache.
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
-static int rt_garbage_collect(void);
+static int rt_garbage_collect(struct dst_ops *ops);
+static void rt_emergency_hash_rebuild(struct net *net);
static struct dst_ops ipv4_dst_ops = {
.negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
- .entry_size = sizeof(struct rtable),
+ .local_out = __ip_local_out,
+ .entries = ATOMIC_INIT(0),
};
#define ECN_OR_COST(class) TC_PRIO_##class
-__u8 ip_tos2prio[16] = {
+const __u8 ip_tos2prio[16] = {
TC_PRIO_BESTEFFORT,
ECN_OR_COST(FILLER),
TC_PRIO_BESTEFFORT,
struct rt_hash_bucket {
struct rtable *chain;
};
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
+ defined(CONFIG_PROVE_LOCKING)
/*
* Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
* The size of this table is a power of two and depends on the number of CPUS.
+ * (on lockdep we have a quite big spinlock_t, so keep the size down there)
*/
-#if NR_CPUS >= 32
-#define RT_HASH_LOCK_SZ 4096
-#elif NR_CPUS >= 16
-#define RT_HASH_LOCK_SZ 2048
-#elif NR_CPUS >= 8
-#define RT_HASH_LOCK_SZ 1024
-#elif NR_CPUS >= 4
-#define RT_HASH_LOCK_SZ 512
+#ifdef CONFIG_LOCKDEP
+# define RT_HASH_LOCK_SZ 256
#else
-#define RT_HASH_LOCK_SZ 256
+# if NR_CPUS >= 32
+# define RT_HASH_LOCK_SZ 4096
+# elif NR_CPUS >= 16
+# define RT_HASH_LOCK_SZ 2048
+# elif NR_CPUS >= 8
+# define RT_HASH_LOCK_SZ 1024
+# elif NR_CPUS >= 4
+# define RT_HASH_LOCK_SZ 512
+# else
+# define RT_HASH_LOCK_SZ 256
+# endif
#endif
static spinlock_t *rt_hash_locks;
# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
-# define rt_hash_lock_init() { \
- int i; \
- rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ, GFP_KERNEL); \
- if (!rt_hash_locks) panic("IP: failed to allocate rt_hash_locks\n"); \
- for (i = 0; i < RT_HASH_LOCK_SZ; i++) \
- spin_lock_init(&rt_hash_locks[i]); \
- }
+
+static __init void rt_hash_lock_init(void)
+{
+ int i;
+
+ rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
+ GFP_KERNEL);
+ if (!rt_hash_locks)
+ panic("IP: failed to allocate rt_hash_locks\n");
+
+ for (i = 0; i < RT_HASH_LOCK_SZ; i++)
+ spin_lock_init(&rt_hash_locks[i]);
+}
#else
# define rt_hash_lock_addr(slot) NULL
-# define rt_hash_lock_init()
+
+static inline void rt_hash_lock_init(void)
+{
+}
#endif
-static struct rt_hash_bucket *rt_hash_table;
-static unsigned rt_hash_mask;
-static int rt_hash_log;
-static unsigned int rt_hash_rnd;
+static struct rt_hash_bucket *rt_hash_table __read_mostly;
+static unsigned rt_hash_mask __read_mostly;
+static unsigned int rt_hash_log __read_mostly;
-struct rt_cache_stat *rt_cache_stat;
+static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
+#define RT_CACHE_STAT_INC(field) \
+ (__raw_get_cpu_var(rt_cache_stat).field++)
-static int rt_intern_hash(unsigned hash, struct rtable *rth,
- struct rtable **res);
+static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
+ int genid)
+{
+ return jhash_3words((__force u32)(__be32)(daddr),
+ (__force u32)(__be32)(saddr),
+ idx, genid)
+ & rt_hash_mask;
+}
-static unsigned int rt_hash_code(u32 daddr, u32 saddr, u8 tos)
+static inline int rt_genid(struct net *net)
{
- return (jhash_3words(daddr, saddr, (u32) tos, rt_hash_rnd)
- & rt_hash_mask);
+ return atomic_read(&net->ipv4.rt_genid);
}
#ifdef CONFIG_PROC_FS
struct rt_cache_iter_state {
+ struct seq_net_private p;
int bucket;
+ int genid;
};
static struct rtable *rt_cache_get_first(struct seq_file *seq)
{
- struct rtable *r = NULL;
struct rt_cache_iter_state *st = seq->private;
+ struct rtable *r = NULL;
for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
+ if (!rt_hash_table[st->bucket].chain)
+ continue;
rcu_read_lock_bh();
- r = rt_hash_table[st->bucket].chain;
- if (r)
- break;
+ r = rcu_dereference(rt_hash_table[st->bucket].chain);
+ while (r) {
+ if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
+ r->rt_genid == st->genid)
+ return r;
+ r = rcu_dereference(r->u.dst.rt_next);
+ }
rcu_read_unlock_bh();
}
return r;
}
-static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
+static struct rtable *__rt_cache_get_next(struct seq_file *seq,
+ struct rtable *r)
{
- struct rt_cache_iter_state *st = rcu_dereference(seq->private);
+ struct rt_cache_iter_state *st = seq->private;
- r = r->u.rt_next;
+ r = r->u.dst.rt_next;
while (!r) {
rcu_read_unlock_bh();
- if (--st->bucket < 0)
- break;
+ do {
+ if (--st->bucket < 0)
+ return NULL;
+ } while (!rt_hash_table[st->bucket].chain);
rcu_read_lock_bh();
r = rt_hash_table[st->bucket].chain;
}
+ return rcu_dereference(r);
+}
+
+static struct rtable *rt_cache_get_next(struct seq_file *seq,
+ struct rtable *r)
+{
+ struct rt_cache_iter_state *st = seq->private;
+ while ((r = __rt_cache_get_next(seq, r)) != NULL) {
+ if (dev_net(r->u.dst.dev) != seq_file_net(seq))
+ continue;
+ if (r->rt_genid == st->genid)
+ break;
+ }
return r;
}
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
- return *pos ? rt_cache_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+ struct rt_cache_iter_state *st = seq->private;
+ if (*pos)
+ return rt_cache_get_idx(seq, *pos - 1);
+ st->genid = rt_genid(seq_file_net(seq));
+ return SEQ_START_TOKEN;
}
static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct rtable *r = NULL;
+ struct rtable *r;
if (v == SEQ_START_TOKEN)
r = rt_cache_get_first(seq);
"HHUptod\tSpecDst");
else {
struct rtable *r = v;
- char temp[256];
+ int len;
- sprintf(temp, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
- "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
+ seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
+ "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
r->u.dst.dev ? r->u.dst.dev->name : "*",
(unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
r->rt_flags, atomic_read(&r->u.dst.__refcnt),
r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
r->u.dst.hh ? (r->u.dst.hh->hh_output ==
dev_queue_xmit) : 0,
- r->rt_spec_dst);
- seq_printf(seq, "%-127s\n", temp);
- }
- return 0;
+ r->rt_spec_dst, &len);
+
+ seq_printf(seq, "%*s\n", 127 - len, "");
+ }
+ return 0;
}
-static struct seq_operations rt_cache_seq_ops = {
+static const struct seq_operations rt_cache_seq_ops = {
.start = rt_cache_seq_start,
.next = rt_cache_seq_next,
.stop = rt_cache_seq_stop,
static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
- struct seq_file *seq;
- int rc = -ENOMEM;
- struct rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
-
- if (!s)
- goto out;
- rc = seq_open(file, &rt_cache_seq_ops);
- if (rc)
- goto out_kfree;
- seq = file->private_data;
- seq->private = s;
- memset(s, 0, sizeof(*s));
-out:
- return rc;
-out_kfree:
- kfree(s);
- goto out;
+ return seq_open_net(inode, file, &rt_cache_seq_ops,
+ sizeof(struct rt_cache_iter_state));
}
-static struct file_operations rt_cache_seq_fops = {
+static const struct file_operations rt_cache_seq_fops = {
.owner = THIS_MODULE,
.open = rt_cache_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
+ .release = seq_release_net,
};
if (*pos == 0)
return SEQ_START_TOKEN;
- for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
- return per_cpu_ptr(rt_cache_stat, cpu);
+ return &per_cpu(rt_cache_stat, cpu);
}
return NULL;
}
{
int cpu;
- for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
- return per_cpu_ptr(rt_cache_stat, cpu);
+ return &per_cpu(rt_cache_stat, cpu);
}
return NULL;
-
+
}
static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
return 0;
}
-
+
seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
" %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
atomic_read(&ipv4_dst_ops.entries),
st->out_hit,
st->out_slow_tot,
- st->out_slow_mc,
+ st->out_slow_mc,
st->gc_total,
st->gc_ignored,
return 0;
}
-static struct seq_operations rt_cpu_seq_ops = {
+static const struct seq_operations rt_cpu_seq_ops = {
.start = rt_cpu_seq_start,
.next = rt_cpu_seq_next,
.stop = rt_cpu_seq_stop,
return seq_open(file, &rt_cpu_seq_ops);
}
-static struct file_operations rt_cpu_seq_fops = {
+static const struct file_operations rt_cpu_seq_fops = {
.owner = THIS_MODULE,
.open = rt_cpu_seq_open,
.read = seq_read,
.release = seq_release,
};
+#ifdef CONFIG_NET_CLS_ROUTE
+static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
+ int length, int *eof, void *data)
+{
+ unsigned int i;
+
+ if ((offset & 3) || (length & 3))
+ return -EIO;
+
+ if (offset >= sizeof(struct ip_rt_acct) * 256) {
+ *eof = 1;
+ return 0;
+ }
+
+ if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
+ length = sizeof(struct ip_rt_acct) * 256 - offset;
+ *eof = 1;
+ }
+
+ offset /= sizeof(u32);
+
+ if (length > 0) {
+ u32 *dst = (u32 *) buffer;
+
+ *start = buffer;
+ memset(dst, 0, length);
+
+ for_each_possible_cpu(i) {
+ unsigned int j;
+ u32 *src;
+
+ src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset;
+ for (j = 0; j < length/4; j++)
+ dst[j] += src[j];
+ }
+ }
+ return length;
+}
+#endif
+
+static int __net_init ip_rt_do_proc_init(struct net *net)
+{
+ struct proc_dir_entry *pde;
+
+ pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
+ &rt_cache_seq_fops);
+ if (!pde)
+ goto err1;
+
+ pde = proc_create("rt_cache", S_IRUGO,
+ net->proc_net_stat, &rt_cpu_seq_fops);
+ if (!pde)
+ goto err2;
+
+#ifdef CONFIG_NET_CLS_ROUTE
+ pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
+ ip_rt_acct_read, NULL);
+ if (!pde)
+ goto err3;
+#endif
+ return 0;
+
+#ifdef CONFIG_NET_CLS_ROUTE
+err3:
+ remove_proc_entry("rt_cache", net->proc_net_stat);
+#endif
+err2:
+ remove_proc_entry("rt_cache", net->proc_net);
+err1:
+ return -ENOMEM;
+}
+
+static void __net_exit ip_rt_do_proc_exit(struct net *net)
+{
+ remove_proc_entry("rt_cache", net->proc_net_stat);
+ remove_proc_entry("rt_cache", net->proc_net);
+ remove_proc_entry("rt_acct", net->proc_net);
+}
+
+static struct pernet_operations ip_rt_proc_ops __net_initdata = {
+ .init = ip_rt_do_proc_init,
+ .exit = ip_rt_do_proc_exit,
+};
+
+static int __init ip_rt_proc_init(void)
+{
+ return register_pernet_subsys(&ip_rt_proc_ops);
+}
+
+#else
+static inline int ip_rt_proc_init(void)
+{
+ return 0;
+}
#endif /* CONFIG_PROC_FS */
-
-static __inline__ void rt_free(struct rtable *rt)
+
+static inline void rt_free(struct rtable *rt)
{
- multipath_remove(rt);
call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
}
-static __inline__ void rt_drop(struct rtable *rt)
+static inline void rt_drop(struct rtable *rt)
{
- multipath_remove(rt);
ip_rt_put(rt);
call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
}
-static __inline__ int rt_fast_clean(struct rtable *rth)
+static inline int rt_fast_clean(struct rtable *rth)
{
/* Kill broadcast/multicast entries very aggresively, if they
collide in hash table with more useful entries */
return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
- rth->fl.iif && rth->u.rt_next;
+ rth->fl.iif && rth->u.dst.rt_next;
}
-static __inline__ int rt_valuable(struct rtable *rth)
+static inline int rt_valuable(struct rtable *rth)
{
return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
rth->u.dst.expires;
return score;
}
+static inline bool rt_caching(const struct net *net)
+{
+ return net->ipv4.current_rt_cache_rebuild_count <=
+ net->ipv4.sysctl_rt_cache_rebuild_count;
+}
+
+static inline bool compare_hash_inputs(const struct flowi *fl1,
+ const struct flowi *fl2)
+{
+ return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
+ (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
+ (fl1->iif ^ fl2->iif)) == 0);
+}
+
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
- return memcmp(&fl1->nl_u.ip4_u, &fl2->nl_u.ip4_u, sizeof(fl1->nl_u.ip4_u)) == 0 &&
- fl1->oif == fl2->oif &&
- fl1->iif == fl2->iif;
+ return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
+ (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
+ (fl1->mark ^ fl2->mark) |
+ (*(u16 *)&fl1->nl_u.ip4_u.tos ^
+ *(u16 *)&fl2->nl_u.ip4_u.tos) |
+ (fl1->oif ^ fl2->oif) |
+ (fl1->iif ^ fl2->iif)) == 0;
}
-#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
-static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
- struct rtable *expentry,
- int *removed_count)
+static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
{
- int passedexpired = 0;
- struct rtable **nextstep = NULL;
- struct rtable **rthp = chain_head;
- struct rtable *rth;
+ return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev);
+}
- if (removed_count)
- *removed_count = 0;
+static inline int rt_is_expired(struct rtable *rth)
+{
+ return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev));
+}
- while ((rth = *rthp) != NULL) {
- if (rth == expentry)
- passedexpired = 1;
+/*
+ * Perform a full scan of hash table and free all entries.
+ * Can be called by a softirq or a process.
+ * In the later case, we want to be reschedule if necessary
+ */
+static void rt_do_flush(int process_context)
+{
+ unsigned int i;
+ struct rtable *rth, *next;
+ struct rtable * tail;
- if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 &&
- compare_keys(&(*rthp)->fl, &expentry->fl)) {
- if (*rthp == expentry) {
- *rthp = rth->u.rt_next;
- continue;
+ for (i = 0; i <= rt_hash_mask; i++) {
+ if (process_context && need_resched())
+ cond_resched();
+ rth = rt_hash_table[i].chain;
+ if (!rth)
+ continue;
+
+ spin_lock_bh(rt_hash_lock_addr(i));
+#ifdef CONFIG_NET_NS
+ {
+ struct rtable ** prev, * p;
+
+ rth = rt_hash_table[i].chain;
+
+ /* defer releasing the head of the list after spin_unlock */
+ for (tail = rth; tail; tail = tail->u.dst.rt_next)
+ if (!rt_is_expired(tail))
+ break;
+ if (rth != tail)
+ rt_hash_table[i].chain = tail;
+
+ /* call rt_free on entries after the tail requiring flush */
+ prev = &rt_hash_table[i].chain;
+ for (p = *prev; p; p = next) {
+ next = p->u.dst.rt_next;
+ if (!rt_is_expired(p)) {
+ prev = &p->u.dst.rt_next;
} else {
- *rthp = rth->u.rt_next;
- rt_free(rth);
- if (removed_count)
- ++(*removed_count);
+ *prev = next;
+ rt_free(p);
}
- } else {
- if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
- passedexpired && !nextstep)
- nextstep = &rth->u.rt_next;
+ }
+ }
+#else
+ rth = rt_hash_table[i].chain;
+ rt_hash_table[i].chain = NULL;
+ tail = NULL;
+#endif
+ spin_unlock_bh(rt_hash_lock_addr(i));
- rthp = &rth->u.rt_next;
+ for (; rth != tail; rth = next) {
+ next = rth->u.dst.rt_next;
+ rt_free(rth);
}
}
-
- rt_free(expentry);
- if (removed_count)
- ++(*removed_count);
-
- return nextstep;
}
-#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
+/*
+ * While freeing expired entries, we compute average chain length
+ * and standard deviation, using fixed-point arithmetic.
+ * This to have an estimation of rt_chain_length_max
+ * rt_chain_length_max = max(elasticity, AVG + 4*SD)
+ * We use 3 bits for frational part, and 29 (or 61) for magnitude.
+ */
+
+#define FRACT_BITS 3
+#define ONE (1UL << FRACT_BITS)
-/* This runs via a timer and thus is always in BH context. */
-static void rt_check_expire(unsigned long dummy)
+static void rt_check_expire(void)
{
- static int rover;
- int i = rover, t;
+ static unsigned int rover;
+ unsigned int i = rover, goal;
struct rtable *rth, **rthp;
- unsigned long now = jiffies;
-
- for (t = ip_rt_gc_interval << rt_hash_log; t >= 0;
- t -= ip_rt_gc_timeout) {
+ unsigned long length = 0, samples = 0;
+ unsigned long sum = 0, sum2 = 0;
+ u64 mult;
+
+ mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
+ if (ip_rt_gc_timeout > 1)
+ do_div(mult, ip_rt_gc_timeout);
+ goal = (unsigned int)mult;
+ if (goal > rt_hash_mask)
+ goal = rt_hash_mask + 1;
+ length = 0;
+ for (; goal > 0; goal--) {
unsigned long tmo = ip_rt_gc_timeout;
i = (i + 1) & rt_hash_mask;
rthp = &rt_hash_table[i].chain;
- spin_lock(rt_hash_lock_addr(i));
+ if (need_resched())
+ cond_resched();
+
+ samples++;
+
+ if (*rthp == NULL)
+ continue;
+ spin_lock_bh(rt_hash_lock_addr(i));
while ((rth = *rthp) != NULL) {
+ if (rt_is_expired(rth)) {
+ *rthp = rth->u.dst.rt_next;
+ rt_free(rth);
+ continue;
+ }
if (rth->u.dst.expires) {
/* Entry is expired even if it is in use */
- if (time_before_eq(now, rth->u.dst.expires)) {
+ if (time_before_eq(jiffies, rth->u.dst.expires)) {
tmo >>= 1;
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.rt_next;
+ /*
+ * Only bump our length if the hash
+ * inputs on entries n and n+1 are not
+ * the same, we only count entries on
+ * a chain with equal hash inputs once
+ * so that entries for different QOS
+ * levels, and other non-hash input
+ * attributes don't unfairly skew
+ * the length computation
+ */
+ if ((*rthp == NULL) ||
+ !compare_hash_inputs(&(*rthp)->fl,
+ &rth->fl))
+ length += ONE;
continue;
}
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
tmo >>= 1;
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.rt_next;
+ if ((*rthp == NULL) ||
+ !compare_hash_inputs(&(*rthp)->fl,
+ &rth->fl))
+ length += ONE;
continue;
}
/* Cleanup aged off entries. */
-#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
- /* remove all related balanced entries if necessary */
- if (rth->u.dst.flags & DST_BALANCED) {
- rthp = rt_remove_balanced_route(
- &rt_hash_table[i].chain,
- rth, NULL);
- if (!rthp)
- break;
- } else {
- *rthp = rth->u.rt_next;
- rt_free(rth);
- }
-#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
- *rthp = rth->u.rt_next;
- rt_free(rth);
-#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
+ *rthp = rth->u.dst.rt_next;
+ rt_free(rth);
}
- spin_unlock(rt_hash_lock_addr(i));
-
- /* Fallback loop breaker. */
- if (time_after(jiffies, now))
- break;
+ spin_unlock_bh(rt_hash_lock_addr(i));
+ sum += length;
+ sum2 += length*length;
+ }
+ if (samples) {
+ unsigned long avg = sum / samples;
+ unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+ rt_chain_length_max = max_t(unsigned long,
+ ip_rt_gc_elasticity,
+ (avg + 4*sd) >> FRACT_BITS);
}
rover = i;
- mod_timer(&rt_periodic_timer, now + ip_rt_gc_interval);
}
-/* This can run from both BH and non-BH contexts, the latter
- * in the case of a forced flush event.
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
*/
-static void rt_run_flush(unsigned long dummy)
+static void rt_worker_func(struct work_struct *work)
{
- int i;
- struct rtable *rth, *next;
-
- rt_deadline = 0;
-
- get_random_bytes(&rt_hash_rnd, 4);
-
- for (i = rt_hash_mask; i >= 0; i--) {
- spin_lock_bh(rt_hash_lock_addr(i));
- rth = rt_hash_table[i].chain;
- if (rth)
- rt_hash_table[i].chain = NULL;
- spin_unlock_bh(rt_hash_lock_addr(i));
-
- for (; rth; rth = next) {
- next = rth->u.rt_next;
- rt_free(rth);
- }
- }
+ rt_check_expire();
+ schedule_delayed_work(&expires_work, ip_rt_gc_interval);
}
-static DEFINE_SPINLOCK(rt_flush_lock);
-
-void rt_cache_flush(int delay)
+/*
+ * Pertubation of rt_genid by a small quantity [1..256]
+ * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
+ * many times (2^24) without giving recent rt_genid.
+ * Jenkins hash is strong enough that litle changes of rt_genid are OK.
+ */
+static void rt_cache_invalidate(struct net *net)
{
- unsigned long now = jiffies;
- int user_mode = !in_softirq();
-
- if (delay < 0)
- delay = ip_rt_min_delay;
-
- /* flush existing multipath state*/
- multipath_flush();
+ unsigned char shuffle;
- spin_lock_bh(&rt_flush_lock);
-
- if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) {
- long tmo = (long)(rt_deadline - now);
-
- /* If flush timer is already running
- and flush request is not immediate (delay > 0):
+ get_random_bytes(&shuffle, sizeof(shuffle));
+ atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+}
- if deadline is not achieved, prolongate timer to "delay",
- otherwise fire it at deadline time.
- */
+/*
+ * delay < 0 : invalidate cache (fast : entries will be deleted later)
+ * delay >= 0 : invalidate & flush cache (can be long)
+ */
+void rt_cache_flush(struct net *net, int delay)
+{
+ rt_cache_invalidate(net);
+ if (delay >= 0)
+ rt_do_flush(!in_softirq());
+}
- if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
- tmo = 0;
-
- if (delay > tmo)
- delay = tmo;
- }
+/*
+ * We change rt_genid and let gc do the cleanup
+ */
+static void rt_secret_rebuild(unsigned long __net)
+{
+ struct net *net = (struct net *)__net;
+ rt_cache_invalidate(net);
+ mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
+}
- if (delay <= 0) {
- spin_unlock_bh(&rt_flush_lock);
- rt_run_flush(0);
- return;
+static void rt_secret_rebuild_oneshot(struct net *net)
+{
+ del_timer_sync(&net->ipv4.rt_secret_timer);
+ rt_cache_invalidate(net);
+ if (ip_rt_secret_interval) {
+ net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval;
+ add_timer(&net->ipv4.rt_secret_timer);
}
-
- if (rt_deadline == 0)
- rt_deadline = now + ip_rt_max_delay;
-
- mod_timer(&rt_flush_timer, now+delay);
- spin_unlock_bh(&rt_flush_lock);
}
-static void rt_secret_rebuild(unsigned long dummy)
+static void rt_emergency_hash_rebuild(struct net *net)
{
- unsigned long now = jiffies;
+ if (net_ratelimit()) {
+ printk(KERN_WARNING "Route hash chain too long!\n");
+ printk(KERN_WARNING "Adjust your secret_interval!\n");
+ }
- rt_cache_flush(0);
- mod_timer(&rt_secret_timer, now + ip_rt_secret_interval);
+ rt_secret_rebuild_oneshot(net);
}
/*
and when load increases it reduces to limit cache size.
*/
-static int rt_garbage_collect(void)
+static int rt_garbage_collect(struct dst_ops *ops)
{
static unsigned long expire = RT_GC_TIMEOUT;
static unsigned long last_gc;
equilibrium = ipv4_dst_ops.gc_thresh;
goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
if (goal > 0) {
- equilibrium += min_t(unsigned int, goal / 2, rt_hash_mask + 1);
+ equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
}
} else {
/* We are in dangerous area. Try to reduce cache really
* aggressively.
*/
- goal = max_t(unsigned int, goal / 2, rt_hash_mask + 1);
+ goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
}
rthp = &rt_hash_table[k].chain;
spin_lock_bh(rt_hash_lock_addr(k));
while ((rth = *rthp) != NULL) {
- if (!rt_may_expire(rth, tmo, expire)) {
+ if (!rt_is_expired(rth) &&
+ !rt_may_expire(rth, tmo, expire)) {
tmo >>= 1;
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.rt_next;
continue;
}
-#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
- /* remove all related balanced entries
- * if necessary
- */
- if (rth->u.dst.flags & DST_BALANCED) {
- int r;
-
- rthp = rt_remove_balanced_route(
- &rt_hash_table[i].chain,
- rth,
- &r);
- goal -= r;
- if (!rthp)
- break;
- } else {
- *rthp = rth->u.rt_next;
- rt_free(rth);
- goal--;
- }
-#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
- *rthp = rth->u.rt_next;
+ *rthp = rth->u.dst.rt_next;
rt_free(rth);
goal--;
-#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
}
spin_unlock_bh(rt_hash_lock_addr(k));
if (goal <= 0)
static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
{
struct rtable *rth, **rthp;
+ struct rtable *rthi;
unsigned long now;
struct rtable *cand, **candp;
u32 min_score;
candp = NULL;
now = jiffies;
+ if (!rt_caching(dev_net(rt->u.dst.dev))) {
+ rt_drop(rt);
+ return 0;
+ }
+
rthp = &rt_hash_table[hash].chain;
+ rthi = NULL;
spin_lock_bh(rt_hash_lock_addr(hash));
while ((rth = *rthp) != NULL) {
-#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
- if (!(rth->u.dst.flags & DST_BALANCED) &&
- compare_keys(&rth->fl, &rt->fl)) {
-#else
- if (compare_keys(&rth->fl, &rt->fl)) {
-#endif
+ if (rt_is_expired(rth)) {
+ *rthp = rth->u.dst.rt_next;
+ rt_free(rth);
+ continue;
+ }
+ if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
/* Put it first */
- *rthp = rth->u.rt_next;
+ *rthp = rth->u.dst.rt_next;
/*
* Since lookup is lockfree, the deletion
* must be visible to another weakly ordered CPU before
* the insertion at the start of the hash chain.
*/
- rcu_assign_pointer(rth->u.rt_next,
+ rcu_assign_pointer(rth->u.dst.rt_next,
rt_hash_table[hash].chain);
/*
* Since lookup is lockfree, the update writes
*/
rcu_assign_pointer(rt_hash_table[hash].chain, rth);
- rth->u.dst.__use++;
- dst_hold(&rth->u.dst);
- rth->u.dst.lastuse = now;
+ dst_use(&rth->u.dst, now);
spin_unlock_bh(rt_hash_lock_addr(hash));
rt_drop(rt);
chain_length++;
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.rt_next;
+
+ /*
+ * check to see if the next entry in the chain
+ * contains the same hash input values as rt. If it does
+ * This is where we will insert into the list, instead of
+ * at the head. This groups entries that differ by aspects not
+ * relvant to the hash function together, which we use to adjust
+ * our chain length
+ */
+ if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
+ rthi = rth;
}
if (cand) {
* only 2 entries per bucket. We will see.
*/
if (chain_length > ip_rt_gc_elasticity) {
- *candp = cand->u.rt_next;
+ *candp = cand->u.dst.rt_next;
rt_free(cand);
}
+ } else {
+ if (chain_length > rt_chain_length_max) {
+ struct net *net = dev_net(rt->u.dst.dev);
+ int num = ++net->ipv4.current_rt_cache_rebuild_count;
+ if (!rt_caching(dev_net(rt->u.dst.dev))) {
+ printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
+ rt->u.dst.dev->name, num);
+ }
+ rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev));
+ }
}
/* Try to bind route to arp only if it is output
int saved_int = ip_rt_gc_min_interval;
ip_rt_gc_elasticity = 1;
ip_rt_gc_min_interval = 0;
- rt_garbage_collect();
+ rt_garbage_collect(&ipv4_dst_ops);
ip_rt_gc_min_interval = saved_int;
ip_rt_gc_elasticity = saved_elasticity;
goto restart;
}
}
- rt->u.rt_next = rt_hash_table[hash].chain;
+ if (rthi)
+ rt->u.dst.rt_next = rthi->u.dst.rt_next;
+ else
+ rt->u.dst.rt_next = rt_hash_table[hash].chain;
+
#if RT_CACHE_DEBUG >= 2
- if (rt->u.rt_next) {
+ if (rt->u.dst.rt_next) {
struct rtable *trt;
- printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
- NIPQUAD(rt->rt_dst));
- for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next)
- printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
+ printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst);
+ for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
+ printk(" . %pI4", &trt->rt_dst);
printk("\n");
}
#endif
- rt_hash_table[hash].chain = rt;
+ /*
+ * Since lookup is lockfree, we must make sure
+ * previous writes to rt are comitted to memory
+ * before making rt visible to other CPUS.
+ */
+ if (rthi)
+ rcu_assign_pointer(rthi->u.dst.rt_next, rt);
+ else
+ rcu_assign_pointer(rt_hash_table[hash].chain, rt);
+
spin_unlock_bh(rt_hash_lock_addr(hash));
*rp = rt;
return 0;
u32 salt;
spin_lock_bh(&ip_fb_id_lock);
- salt = secure_ip_id(ip_fallback_id ^ iph->daddr);
+ salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
iph->id = htons(salt & 0xFFFF);
ip_fallback_id = salt;
spin_unlock_bh(&ip_fb_id_lock);
return;
}
} else
- printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
+ printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
__builtin_return_address(0));
ip_select_fb_ident(iph);
static void rt_del(unsigned hash, struct rtable *rt)
{
- struct rtable **rthp;
+ struct rtable **rthp, *aux;
+ rthp = &rt_hash_table[hash].chain;
spin_lock_bh(rt_hash_lock_addr(hash));
ip_rt_put(rt);
- for (rthp = &rt_hash_table[hash].chain; *rthp;
- rthp = &(*rthp)->u.rt_next)
- if (*rthp == rt) {
- *rthp = rt->u.rt_next;
- rt_free(rt);
- break;
+ while ((aux = *rthp) != NULL) {
+ if (aux == rt || rt_is_expired(aux)) {
+ *rthp = aux->u.dst.rt_next;
+ rt_free(aux);
+ continue;
}
+ rthp = &aux->u.dst.rt_next;
+ }
spin_unlock_bh(rt_hash_lock_addr(hash));
}
-void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
- u32 saddr, u8 tos, struct net_device *dev)
+void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
+ __be32 saddr, struct net_device *dev)
{
int i, k;
struct in_device *in_dev = in_dev_get(dev);
struct rtable *rth, **rthp;
- u32 skeys[2] = { saddr, 0 };
+ __be32 skeys[2] = { saddr, 0 };
int ikeys[2] = { dev->ifindex, 0 };
-
- tos &= IPTOS_RT_MASK;
+ struct netevent_redirect netevent;
+ struct net *net;
if (!in_dev)
return;
+ net = dev_net(dev);
if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
- || MULTICAST(new_gw) || BADCLASS(new_gw) || ZERONET(new_gw))
+ || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw)
+ || ipv4_is_zeronet(new_gw))
+ goto reject_redirect;
+
+ if (!rt_caching(net))
goto reject_redirect;
if (!IN_DEV_SHARED_MEDIA(in_dev)) {
if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
goto reject_redirect;
} else {
- if (inet_addr_type(new_gw) != RTN_UNICAST)
+ if (inet_addr_type(net, new_gw) != RTN_UNICAST)
goto reject_redirect;
}
for (i = 0; i < 2; i++) {
for (k = 0; k < 2; k++) {
- unsigned hash = rt_hash_code(daddr,
- skeys[i] ^ (ikeys[k] << 5),
- tos);
+ unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
+ rt_genid(net));
rthp=&rt_hash_table[hash].chain;
if (rth->fl.fl4_dst != daddr ||
rth->fl.fl4_src != skeys[i] ||
- rth->fl.fl4_tos != tos ||
rth->fl.oif != ikeys[k] ||
- rth->fl.iif != 0) {
- rthp = &rth->u.rt_next;
+ rth->fl.iif != 0 ||
+ rt_is_expired(rth) ||
+ !net_eq(dev_net(rth->u.dst.dev), net)) {
+ rthp = &rth->u.dst.rt_next;
continue;
}
/* Copy all the information. */
*rt = *rth;
- INIT_RCU_HEAD(&rt->u.dst.rcu_head);
rt->u.dst.__use = 1;
atomic_set(&rt->u.dst.__refcnt, 1);
rt->u.dst.child = NULL;
rt->u.dst.path = &rt->u.dst;
rt->u.dst.neighbour = NULL;
rt->u.dst.hh = NULL;
+#ifdef CONFIG_XFRM
rt->u.dst.xfrm = NULL;
-
+#endif
+ rt->rt_genid = rt_genid(net);
rt->rt_flags |= RTCF_REDIRECTED;
/* Gateway is different ... */
goto do_next;
}
+ netevent.old = &rth->u.dst;
+ netevent.new = &rt->u.dst;
+ call_netevent_notifiers(NETEVENT_REDIRECT,
+ &netevent);
+
rt_del(hash, rth);
if (!rt_intern_hash(hash, rt, &rt))
ip_rt_put(rt);
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
- "%u.%u.%u.%u ignored.\n"
- " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u, "
- "tos %02x\n",
- NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
- NIPQUAD(saddr), NIPQUAD(daddr), tos);
+ printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
+ " Advised path = %pI4 -> %pI4\n",
+ &old_gw, dev->name, &new_gw,
+ &saddr, &daddr);
#endif
in_dev_put(in_dev);
}
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
{
- struct rtable *rt = (struct rtable*)dst;
+ struct rtable *rt = (struct rtable *)dst;
struct dst_entry *ret = dst;
if (rt) {
ret = NULL;
} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
rt->u.dst.expires) {
- unsigned hash = rt_hash_code(rt->fl.fl4_dst,
- rt->fl.fl4_src ^
- (rt->fl.oif << 5),
- rt->fl.fl4_tos);
+ unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
+ rt->fl.oif,
+ rt_genid(dev_net(dst->dev)));
#if RT_CACHE_DEBUG >= 1
- printk(KERN_DEBUG "ip_rt_advice: redirect to "
- "%u.%u.%u.%u/%02x dropped\n",
- NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
+ printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
+ &rt->rt_dst, rt->fl.fl4_tos);
#endif
rt_del(hash, rt);
ret = NULL;
void ip_rt_send_redirect(struct sk_buff *skb)
{
- struct rtable *rt = (struct rtable*)skb->dst;
+ struct rtable *rt = skb->rtable;
struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
if (!in_dev)
/* Check for load limit; set rate_last to the latest sent
* redirect.
*/
- if (time_after(jiffies,
+ if (rt->u.dst.rate_tokens == 0 ||
+ time_after(jiffies,
(rt->u.dst.rate_last +
(ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
if (IN_DEV_LOG_MARTIANS(in_dev) &&
rt->u.dst.rate_tokens == ip_rt_redirect_number &&
net_ratelimit())
- printk(KERN_WARNING "host %u.%u.%u.%u/if%d ignores "
- "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
- NIPQUAD(rt->rt_src), rt->rt_iif,
- NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
+ printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
+ &rt->rt_src, rt->rt_iif,
+ &rt->rt_dst, &rt->rt_gateway);
#endif
}
out:
- in_dev_put(in_dev);
+ in_dev_put(in_dev);
}
static int ip_error(struct sk_buff *skb)
{
- struct rtable *rt = (struct rtable*)skb->dst;
+ struct rtable *rt = skb->rtable;
unsigned long now;
int code;
break;
case ENETUNREACH:
code = ICMP_NET_UNREACH;
+ IP_INC_STATS_BH(dev_net(rt->u.dst.dev),
+ IPSTATS_MIB_INNOROUTES);
break;
case EACCES:
code = ICMP_PKT_FILTERED;
out: kfree_skb(skb);
return 0;
-}
+}
/*
* The last two values are not from the RFC but
* are needed for AMPRnet AX.25 paths.
*/
-static unsigned short mtu_plateau[] =
+static const unsigned short mtu_plateau[] =
{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
-static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
+static inline unsigned short guess_mtu(unsigned short old_mtu)
{
int i;
-
+
for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
if (old_mtu > mtu_plateau[i])
return mtu_plateau[i];
return 68;
}
-unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
+unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
+ unsigned short new_mtu,
+ struct net_device *dev)
{
- int i;
+ int i, k;
unsigned short old_mtu = ntohs(iph->tot_len);
struct rtable *rth;
- u32 skeys[2] = { iph->saddr, 0, };
- u32 daddr = iph->daddr;
- u8 tos = iph->tos & IPTOS_RT_MASK;
+ int ikeys[2] = { dev->ifindex, 0 };
+ __be32 skeys[2] = { iph->saddr, 0, };
+ __be32 daddr = iph->daddr;
unsigned short est_mtu = 0;
if (ipv4_config.no_pmtu_disc)
return 0;
- for (i = 0; i < 2; i++) {
- unsigned hash = rt_hash_code(daddr, skeys[i], tos);
+ for (k = 0; k < 2; k++) {
+ for (i = 0; i < 2; i++) {
+ unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
+ rt_genid(net));
- rcu_read_lock();
- for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->u.rt_next)) {
- if (rth->fl.fl4_dst == daddr &&
- rth->fl.fl4_src == skeys[i] &&
- rth->rt_dst == daddr &&
- rth->rt_src == iph->saddr &&
- rth->fl.fl4_tos == tos &&
- rth->fl.iif == 0 &&
- !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
+ rcu_read_lock();
+ for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
+ rth = rcu_dereference(rth->u.dst.rt_next)) {
unsigned short mtu = new_mtu;
+ if (rth->fl.fl4_dst != daddr ||
+ rth->fl.fl4_src != skeys[i] ||
+ rth->rt_dst != daddr ||
+ rth->rt_src != iph->saddr ||
+ rth->fl.oif != ikeys[k] ||
+ rth->fl.iif != 0 ||
+ dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
+ !net_eq(dev_net(rth->u.dst.dev), net) ||
+ rt_is_expired(rth))
+ continue;
+
if (new_mtu < 68 || new_mtu >= old_mtu) {
/* BSD 4.2 compatibility hack :-( */
if (mtu == 0 &&
- old_mtu >= rth->u.dst.metrics[RTAX_MTU-1] &&
+ old_mtu >= dst_mtu(&rth->u.dst) &&
old_mtu >= 68 + (iph->ihl << 2))
old_mtu -= iph->ihl << 2;
mtu = guess_mtu(old_mtu);
}
- if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
- if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
+ if (mtu <= dst_mtu(&rth->u.dst)) {
+ if (mtu < dst_mtu(&rth->u.dst)) {
dst_confirm(&rth->u.dst);
if (mtu < ip_rt_min_pmtu) {
mtu = ip_rt_min_pmtu;
est_mtu = mtu;
}
}
+ rcu_read_unlock();
}
- rcu_read_unlock();
}
return est_mtu ? : new_mtu;
}
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
{
- if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= 68 &&
+ if (dst_mtu(dst) > mtu && mtu >= 68 &&
!(dst_metric_locked(dst, RTAX_MTU))) {
if (mtu < ip_rt_min_pmtu) {
mtu = ip_rt_min_pmtu;
}
dst->metrics[RTAX_MTU-1] = mtu;
dst_set_expires(dst, ip_rt_mtu_expires);
+ call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
}
}
{
struct rtable *rt = (struct rtable *) dst;
struct in_device *idev = rt->idev;
- if (dev != &loopback_dev && idev && idev->dev == dev) {
- struct in_device *loopback_idev = in_dev_get(&loopback_dev);
+ if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
+ struct in_device *loopback_idev =
+ in_dev_get(dev_net(dev)->loopback_dev);
if (loopback_idev) {
rt->idev = loopback_idev;
in_dev_put(idev);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
- rt = (struct rtable *) skb->dst;
+ rt = skb->rtable;
if (rt)
dst_set_expires(&rt->u.dst, 0);
}
static int ip_rt_bug(struct sk_buff *skb)
{
- printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
- NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr),
+ printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
+ &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
skb->dev ? skb->dev->name : "?");
kfree_skb(skb);
return 0;
void ip_rt_get_source(u8 *addr, struct rtable *rt)
{
- u32 src;
+ __be32 src;
struct fib_result res;
if (rt->fl.iif == 0)
src = rt->rt_src;
- else if (fib_lookup(&rt->fl, &res) == 0) {
+ else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) {
src = FIB_RES_PREFSRC(res);
fib_res_put(&res);
} else
sizeof(rt->u.dst.metrics));
if (fi->fib_mtu == 0) {
rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
- if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) &&
+ if (dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
rt->rt_gateway != rt->rt_dst &&
rt->u.dst.dev->mtu > 576)
rt->u.dst.metrics[RTAX_MTU-1] = 576;
} else
rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
- if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
+ if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
- if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU)
+ if (dst_mtu(&rt->u.dst) > IP_MAX_MTU)
rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
- if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0)
+ if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0)
rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
ip_rt_min_advmss);
- if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40)
+ if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40)
rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
#ifdef CONFIG_NET_CLS_ROUTE
#endif
set_class_tag(rt, itag);
#endif
- rt->rt_type = res->type;
+ rt->rt_type = res->type;
}
-static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
+static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, int our)
{
unsigned hash;
struct rtable *rth;
- u32 spec_dst;
+ __be32 spec_dst;
struct in_device *in_dev = in_dev_get(dev);
u32 itag = 0;
if (in_dev == NULL)
return -EINVAL;
- if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||
- skb->protocol != htons(ETH_P_IP))
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
+ ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
goto e_inval;
- if (ZERONET(saddr)) {
- if (!LOCAL_MCAST(daddr))
+ if (ipv4_is_zeronet(saddr)) {
+ if (!ipv4_is_local_multicast(daddr))
goto e_inval;
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
} else if (fib_validate_source(saddr, 0, tos, 0,
atomic_set(&rth->u.dst.__refcnt, 1);
rth->u.dst.flags= DST_HOST;
- if (in_dev->cnf.no_policy)
+ if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
rth->u.dst.flags |= DST_NOPOLICY;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark= skb->nfmark;
-#endif
+ rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
#ifdef CONFIG_NET_CLS_ROUTE
#endif
rth->rt_iif =
rth->fl.iif = dev->ifindex;
- rth->u.dst.dev = &loopback_dev;
+ rth->u.dst.dev = init_net.loopback_dev;
dev_hold(rth->u.dst.dev);
rth->idev = in_dev_get(rth->u.dst.dev);
rth->fl.oif = 0;
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
- rth->rt_type = RTN_MULTICAST;
+ rth->rt_genid = rt_genid(dev_net(dev));
rth->rt_flags = RTCF_MULTICAST;
+ rth->rt_type = RTN_MULTICAST;
if (our) {
rth->u.dst.input= ip_local_deliver;
rth->rt_flags |= RTCF_LOCAL;
}
#ifdef CONFIG_IP_MROUTE
- if (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
+ if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
rth->u.dst.input = ip_mr_input;
#endif
RT_CACHE_STAT_INC(in_slow_mc);
in_dev_put(in_dev);
- hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5), tos);
- return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
+ hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
+ return rt_intern_hash(hash, rth, &skb->rtable);
e_nobufs:
in_dev_put(in_dev);
static void ip_handle_martian_source(struct net_device *dev,
struct in_device *in_dev,
struct sk_buff *skb,
- u32 daddr,
- u32 saddr)
+ __be32 daddr,
+ __be32 saddr)
{
RT_CACHE_STAT_INC(in_martian_src);
#ifdef CONFIG_IP_ROUTE_VERBOSE
* RFC1812 recommendation, if source is martian,
* the only hint is MAC header.
*/
- printk(KERN_WARNING "martian source %u.%u.%u.%u from "
- "%u.%u.%u.%u, on dev %s\n",
- NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
- if (dev->hard_header_len) {
+ printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
+ &daddr, &saddr, dev->name);
+ if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
int i;
- unsigned char *p = skb->mac.raw;
+ const unsigned char *p = skb_mac_header(skb);
printk(KERN_WARNING "ll header: ");
for (i = 0; i < dev->hard_header_len; i++, p++) {
printk("%02x", *p);
#endif
}
-static inline int __mkroute_input(struct sk_buff *skb,
- struct fib_result* res,
- struct in_device *in_dev,
- u32 daddr, u32 saddr, u32 tos,
- struct rtable **result)
+static int __mkroute_input(struct sk_buff *skb,
+ struct fib_result *res,
+ struct in_device *in_dev,
+ __be32 daddr, __be32 saddr, u32 tos,
+ struct rtable **result)
{
struct rtable *rth;
int err;
struct in_device *out_dev;
unsigned flags = 0;
- u32 spec_dst, itag;
+ __be32 spec_dst;
+ u32 itag;
/* get a working reference to the output device */
out_dev = in_dev_get(FIB_RES_DEV(*res));
}
- err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
+ err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
in_dev->dev, &spec_dst, &itag);
if (err < 0) {
- ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
+ ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
-
+
err = -EINVAL;
goto cleanup;
}
if (err)
flags |= RTCF_DIRECTSRC;
- if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
+ if (out_dev == in_dev && err &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
flags |= RTCF_DOREDIRECT;
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
*/
- if (out_dev == in_dev && !(flags & RTCF_DNAT)) {
+ if (out_dev == in_dev) {
err = -EINVAL;
goto cleanup;
}
goto cleanup;
}
+ atomic_set(&rth->u.dst.__refcnt, 1);
rth->u.dst.flags= DST_HOST;
-#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
- if (res->fi->fib_nhs > 1)
- rth->u.dst.flags |= DST_BALANCED;
-#endif
- if (in_dev->cnf.no_policy)
+ if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
rth->u.dst.flags |= DST_NOPOLICY;
- if (in_dev->cnf.no_xfrm)
+ if (IN_DEV_CONF_GET(out_dev, NOXFRM))
rth->u.dst.flags |= DST_NOXFRM;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark= skb->nfmark;
-#endif
+ rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
rth->rt_gateway = daddr;
rth->u.dst.input = ip_forward;
rth->u.dst.output = ip_output;
+ rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
rt_set_nexthop(rth, res, itag);
/* release the working reference to the output device */
in_dev_put(out_dev);
return err;
-}
+}
-static inline int ip_mkroute_input_def(struct sk_buff *skb,
- struct fib_result* res,
- const struct flowi *fl,
- struct in_device *in_dev,
- u32 daddr, u32 saddr, u32 tos)
+static int ip_mkroute_input(struct sk_buff *skb,
+ struct fib_result *res,
+ const struct flowi *fl,
+ struct in_device *in_dev,
+ __be32 daddr, __be32 saddr, u32 tos)
{
struct rtable* rth = NULL;
int err;
err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
if (err)
return err;
- atomic_set(&rth->u.dst.__refcnt, 1);
/* put it into the cache */
- hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5), tos);
- return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
-}
-
-static inline int ip_mkroute_input(struct sk_buff *skb,
- struct fib_result* res,
- const struct flowi *fl,
- struct in_device *in_dev,
- u32 daddr, u32 saddr, u32 tos)
-{
-#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
- struct rtable* rth = NULL;
- unsigned char hop, hopcount, lasthop;
- int err = -EINVAL;
- unsigned int hash;
-
- if (res->fi)
- hopcount = res->fi->fib_nhs;
- else
- hopcount = 1;
-
- lasthop = hopcount - 1;
-
- /* distinguish between multipath and singlepath */
- if (hopcount < 2)
- return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
- saddr, tos);
-
- /* add all alternatives to the routing cache */
- for (hop = 0; hop < hopcount; hop++) {
- res->nh_sel = hop;
-
- /* create a routing cache entry */
- err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos,
- &rth);
- if (err)
- return err;
-
- /* put it into the cache */
- hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5), tos);
- err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
- if (err)
- return err;
-
- /* forward hop information to multipath impl. */
- multipath_set_nhinfo(rth,
- FIB_RES_NETWORK(*res),
- FIB_RES_NETMASK(*res),
- res->prefixlen,
- &FIB_RES_NH(*res));
-
- /* only for the last hop the reference count is handled
- * outside
- */
- if (hop == lasthop)
- atomic_set(&(skb->dst->__refcnt), 1);
- }
- return err;
-#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
- return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos);
-#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
+ hash = rt_hash(daddr, saddr, fl->iif,
+ rt_genid(dev_net(rth->u.dst.dev)));
+ return rt_intern_hash(hash, rth, &skb->rtable);
}
-
/*
* NOTE. We drop all the packets that has local source
* addresses, because every properly looped back packet
* 2. IP spoofing attempts are filtered with 100% of guarantee.
*/
-static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
+static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
struct fib_result res;
.saddr = saddr,
.tos = tos,
.scope = RT_SCOPE_UNIVERSE,
-#ifdef CONFIG_IP_ROUTE_FWMARK
- .fwmark = skb->nfmark
-#endif
} },
+ .mark = skb->mark,
.iif = dev->ifindex };
unsigned flags = 0;
u32 itag = 0;
struct rtable * rth;
unsigned hash;
- u32 spec_dst;
+ __be32 spec_dst;
int err = -EINVAL;
int free_res = 0;
+ struct net * net = dev_net(dev);
/* IP on this device is disabled. */
by fib_lookup.
*/
- if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr))
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
+ ipv4_is_loopback(saddr))
goto martian_source;
- if (daddr == 0xFFFFFFFF || (saddr == 0 && daddr == 0))
+ if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
goto brd_input;
/* Accept zero addresses only to limited broadcast;
* I even do not know to fix it or not. Waiting for complains :-)
*/
- if (ZERONET(saddr))
+ if (ipv4_is_zeronet(saddr))
goto martian_source;
- if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
+ if (ipv4_is_lbcast(daddr) || ipv4_is_zeronet(daddr) ||
+ ipv4_is_loopback(daddr))
goto martian_destination;
/*
* Now we are ready to route packet.
*/
- if ((err = fib_lookup(&fl, &res)) != 0) {
+ if ((err = fib_lookup(net, &fl, &res)) != 0) {
if (!IN_DEV_FORWARD(in_dev))
goto e_hostunreach;
goto no_route;
if (res.type == RTN_LOCAL) {
int result;
result = fib_validate_source(saddr, daddr, tos,
- loopback_dev.ifindex,
+ net->loopback_dev->ifindex,
dev, &spec_dst, &itag);
if (result < 0)
goto martian_source;
goto martian_destination;
err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
- if (err == -ENOBUFS)
- goto e_nobufs;
- if (err == -EINVAL)
- goto e_inval;
-
done:
in_dev_put(in_dev);
if (free_res)
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
- if (ZERONET(saddr))
+ if (ipv4_is_zeronet(saddr))
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
else {
err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
goto e_nobufs;
rth->u.dst.output= ip_rt_bug;
+ rth->rt_genid = rt_genid(net);
atomic_set(&rth->u.dst.__refcnt, 1);
rth->u.dst.flags= DST_HOST;
- if (in_dev->cnf.no_policy)
+ if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
rth->u.dst.flags |= DST_NOPOLICY;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark= skb->nfmark;
-#endif
+ rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
#ifdef CONFIG_NET_CLS_ROUTE
#endif
rth->rt_iif =
rth->fl.iif = dev->ifindex;
- rth->u.dst.dev = &loopback_dev;
+ rth->u.dst.dev = net->loopback_dev;
dev_hold(rth->u.dst.dev);
rth->idev = in_dev_get(rth->u.dst.dev);
rth->rt_gateway = daddr;
rth->rt_flags &= ~RTCF_LOCAL;
}
rth->rt_type = res.type;
- hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5), tos);
- err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
+ hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
+ err = rt_intern_hash(hash, rth, &skb->rtable);
goto done;
no_route:
RT_CACHE_STAT_INC(in_no_route);
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
res.type = RTN_UNREACHABLE;
+ if (err == -ESRCH)
+ err = -ENETUNREACH;
goto local_input;
/*
RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- printk(KERN_WARNING "martian destination %u.%u.%u.%u from "
- "%u.%u.%u.%u, dev %s\n",
- NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
+ printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
+ &daddr, &saddr, dev->name);
#endif
e_hostunreach:
- err = -EHOSTUNREACH;
- goto done;
+ err = -EHOSTUNREACH;
+ goto done;
e_inval:
err = -EINVAL;
goto e_inval;
}
-int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
+int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
struct rtable * rth;
unsigned hash;
int iif = dev->ifindex;
+ struct net *net;
+
+ net = dev_net(dev);
+
+ if (!rt_caching(net))
+ goto skip_cache;
tos &= IPTOS_RT_MASK;
- hash = rt_hash_code(daddr, saddr ^ (iif << 5), tos);
+ hash = rt_hash(daddr, saddr, iif, rt_genid(net));
rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->u.rt_next)) {
- if (rth->fl.fl4_dst == daddr &&
- rth->fl.fl4_src == saddr &&
- rth->fl.iif == iif &&
- rth->fl.oif == 0 &&
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark == skb->nfmark &&
-#endif
- rth->fl.fl4_tos == tos) {
- rth->u.dst.lastuse = jiffies;
- dst_hold(&rth->u.dst);
- rth->u.dst.__use++;
+ rth = rcu_dereference(rth->u.dst.rt_next)) {
+ if (((rth->fl.fl4_dst ^ daddr) |
+ (rth->fl.fl4_src ^ saddr) |
+ (rth->fl.iif ^ iif) |
+ rth->fl.oif |
+ (rth->fl.fl4_tos ^ tos)) == 0 &&
+ rth->fl.mark == skb->mark &&
+ net_eq(dev_net(rth->u.dst.dev), net) &&
+ !rt_is_expired(rth)) {
+ dst_use(&rth->u.dst, jiffies);
RT_CACHE_STAT_INC(in_hit);
rcu_read_unlock();
- skb->dst = (struct dst_entry*)rth;
+ skb->rtable = rth;
return 0;
}
RT_CACHE_STAT_INC(in_hlist_search);
}
rcu_read_unlock();
+skip_cache:
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
hardware multicast filters :-( As result the host on multicasting
Note, that multicast routers are not affected, because
route cache entry is created eventually.
*/
- if (MULTICAST(daddr)) {
+ if (ipv4_is_multicast(daddr)) {
struct in_device *in_dev;
rcu_read_lock();
- if ((in_dev = __in_dev_get(dev)) != NULL) {
+ if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
int our = ip_check_mc(in_dev, daddr, saddr,
- skb->nh.iph->protocol);
+ ip_hdr(skb)->protocol);
if (our
#ifdef CONFIG_IP_MROUTE
- || (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
+ || (!ipv4_is_local_multicast(daddr) &&
+ IN_DEV_MFORWARD(in_dev))
#endif
) {
rcu_read_unlock();
return ip_route_input_slow(skb, daddr, saddr, tos, dev);
}
-static inline int __mkroute_output(struct rtable **result,
- struct fib_result* res,
- const struct flowi *fl,
- const struct flowi *oldflp,
- struct net_device *dev_out,
- unsigned flags)
+static int __mkroute_output(struct rtable **result,
+ struct fib_result *res,
+ const struct flowi *fl,
+ const struct flowi *oldflp,
+ struct net_device *dev_out,
+ unsigned flags)
{
struct rtable *rth;
struct in_device *in_dev;
u32 tos = RT_FL_TOS(oldflp);
int err = 0;
- if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
+ if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
return -EINVAL;
- if (fl->fl4_dst == 0xFFFFFFFF)
+ if (fl->fl4_dst == htonl(0xFFFFFFFF))
res->type = RTN_BROADCAST;
- else if (MULTICAST(fl->fl4_dst))
+ else if (ipv4_is_multicast(fl->fl4_dst))
res->type = RTN_MULTICAST;
- else if (BADCLASS(fl->fl4_dst) || ZERONET(fl->fl4_dst))
+ else if (ipv4_is_lbcast(fl->fl4_dst) || ipv4_is_zeronet(fl->fl4_dst))
return -EINVAL;
if (dev_out->flags & IFF_LOOPBACK)
}
} else if (res->type == RTN_MULTICAST) {
flags |= RTCF_MULTICAST|RTCF_LOCAL;
- if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
+ if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
oldflp->proto))
flags &= ~RTCF_LOCAL;
/* If multicast route do not exist use
if (!rth) {
err = -ENOBUFS;
goto cleanup;
- }
+ }
+ atomic_set(&rth->u.dst.__refcnt, 1);
rth->u.dst.flags= DST_HOST;
-#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
- if (res->fi) {
- rth->rt_multipath_alg = res->fi->fib_mp_alg;
- if (res->fi->fib_nhs > 1)
- rth->u.dst.flags |= DST_BALANCED;
- }
-#endif
- if (in_dev->cnf.no_xfrm)
+ if (IN_DEV_CONF_GET(in_dev, NOXFRM))
rth->u.dst.flags |= DST_NOXFRM;
- if (in_dev->cnf.no_policy)
+ if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
rth->u.dst.flags |= DST_NOPOLICY;
rth->fl.fl4_dst = oldflp->fl4_dst;
rth->fl.fl4_tos = tos;
rth->fl.fl4_src = oldflp->fl4_src;
rth->fl.oif = oldflp->oif;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark= oldflp->fl4_fwmark;
-#endif
+ rth->fl.mark = oldflp->mark;
rth->rt_dst = fl->fl4_dst;
rth->rt_src = fl->fl4_src;
rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
- /* get references to the devices that are to be hold by the routing
+ /* get references to the devices that are to be hold by the routing
cache entry */
rth->u.dst.dev = dev_out;
dev_hold(dev_out);
rth->rt_spec_dst= fl->fl4_src;
rth->u.dst.output=ip_output;
+ rth->rt_genid = rt_genid(dev_net(dev_out));
RT_CACHE_STAT_INC(out_slow_tot);
}
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
rth->rt_spec_dst = fl->fl4_src;
- if (flags & RTCF_LOCAL &&
+ if (flags & RTCF_LOCAL &&
!(dev_out->flags & IFF_LOOPBACK)) {
rth->u.dst.output = ip_mc_output;
RT_CACHE_STAT_INC(out_slow_mc);
#ifdef CONFIG_IP_MROUTE
if (res->type == RTN_MULTICAST) {
if (IN_DEV_MFORWARD(in_dev) &&
- !LOCAL_MCAST(oldflp->fl4_dst)) {
+ !ipv4_is_local_multicast(oldflp->fl4_dst)) {
rth->u.dst.input = ip_mr_input;
rth->u.dst.output = ip_mc_output;
}
return err;
}
-static inline int ip_mkroute_output_def(struct rtable **rp,
- struct fib_result* res,
- const struct flowi *fl,
- const struct flowi *oldflp,
- struct net_device *dev_out,
- unsigned flags)
+static int ip_mkroute_output(struct rtable **rp,
+ struct fib_result *res,
+ const struct flowi *fl,
+ const struct flowi *oldflp,
+ struct net_device *dev_out,
+ unsigned flags)
{
struct rtable *rth = NULL;
int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
unsigned hash;
if (err == 0) {
- u32 tos = RT_FL_TOS(oldflp);
-
- atomic_set(&rth->u.dst.__refcnt, 1);
-
- hash = rt_hash_code(oldflp->fl4_dst,
- oldflp->fl4_src ^ (oldflp->oif << 5), tos);
+ hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
+ rt_genid(dev_net(dev_out)));
err = rt_intern_hash(hash, rth, rp);
}
-
- return err;
-}
-
-static inline int ip_mkroute_output(struct rtable** rp,
- struct fib_result* res,
- const struct flowi *fl,
- const struct flowi *oldflp,
- struct net_device *dev_out,
- unsigned flags)
-{
-#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
- u32 tos = RT_FL_TOS(oldflp);
- unsigned char hop;
- unsigned hash;
- int err = -EINVAL;
- struct rtable *rth = NULL;
-
- if (res->fi && res->fi->fib_nhs > 1) {
- unsigned char hopcount = res->fi->fib_nhs;
-
- for (hop = 0; hop < hopcount; hop++) {
- struct net_device *dev2nexthop;
-
- res->nh_sel = hop;
-
- /* hold a work reference to the output device */
- dev2nexthop = FIB_RES_DEV(*res);
- dev_hold(dev2nexthop);
-
- err = __mkroute_output(&rth, res, fl, oldflp,
- dev2nexthop, flags);
-
- if (err != 0)
- goto cleanup;
- hash = rt_hash_code(oldflp->fl4_dst,
- oldflp->fl4_src ^
- (oldflp->oif << 5), tos);
- err = rt_intern_hash(hash, rth, rp);
-
- /* forward hop information to multipath impl. */
- multipath_set_nhinfo(rth,
- FIB_RES_NETWORK(*res),
- FIB_RES_NETMASK(*res),
- res->prefixlen,
- &FIB_RES_NH(*res));
- cleanup:
- /* release work reference to output device */
- dev_put(dev2nexthop);
-
- if (err != 0)
- return err;
- }
- atomic_set(&(*rp)->u.dst.__refcnt, 1);
- return err;
- } else {
- return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out,
- flags);
- }
-#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
- return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out, flags);
-#endif
+ return err;
}
/*
* Major route resolver routine.
*/
-static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
+static int ip_route_output_slow(struct net *net, struct rtable **rp,
+ const struct flowi *oldflp)
{
u32 tos = RT_FL_TOS(oldflp);
struct flowi fl = { .nl_u = { .ip4_u =
.scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK :
RT_SCOPE_UNIVERSE),
-#ifdef CONFIG_IP_ROUTE_FWMARK
- .fwmark = oldflp->fl4_fwmark
-#endif
} },
- .iif = loopback_dev.ifindex,
+ .mark = oldflp->mark,
+ .iif = net->loopback_dev->ifindex,
.oif = oldflp->oif };
struct fib_result res;
unsigned flags = 0;
if (oldflp->fl4_src) {
err = -EINVAL;
- if (MULTICAST(oldflp->fl4_src) ||
- BADCLASS(oldflp->fl4_src) ||
- ZERONET(oldflp->fl4_src))
- goto out;
-
- /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
- dev_out = ip_dev_find(oldflp->fl4_src);
- if (dev_out == NULL)
+ if (ipv4_is_multicast(oldflp->fl4_src) ||
+ ipv4_is_lbcast(oldflp->fl4_src) ||
+ ipv4_is_zeronet(oldflp->fl4_src))
goto out;
/* I removed check for oif == dev_out->oif here.
It was wrong for two reasons:
- 1. ip_dev_find(saddr) can return wrong iface, if saddr is
- assigned to multiple interfaces.
+ 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
+ is assigned to multiple interfaces.
2. Moreover, we are allowed to send packets with saddr
of another iface. --ANK
*/
if (oldflp->oif == 0
- && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF)) {
+ && (ipv4_is_multicast(oldflp->fl4_dst) ||
+ oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
+ /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
+ dev_out = ip_dev_find(net, oldflp->fl4_src);
+ if (dev_out == NULL)
+ goto out;
+
/* Special hack: user can direct multicasts
and limited broadcast via necessary interface
without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
fl.oif = dev_out->ifindex;
goto make_route;
}
- if (dev_out)
+
+ if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
+ /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
+ dev_out = ip_dev_find(net, oldflp->fl4_src);
+ if (dev_out == NULL)
+ goto out;
dev_put(dev_out);
- dev_out = NULL;
+ dev_out = NULL;
+ }
}
if (oldflp->oif) {
- dev_out = dev_get_by_index(oldflp->oif);
+ dev_out = dev_get_by_index(net, oldflp->oif);
err = -ENODEV;
if (dev_out == NULL)
goto out;
- if (__in_dev_get(dev_out) == NULL) {
+
+ /* RACE: Check return value of inet_select_addr instead. */
+ if (__in_dev_get_rtnl(dev_out) == NULL) {
dev_put(dev_out);
goto out; /* Wrong error code */
}
- if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF) {
+ if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
+ oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
if (!fl.fl4_src)
fl.fl4_src = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
goto make_route;
}
if (!fl.fl4_src) {
- if (MULTICAST(oldflp->fl4_dst))
+ if (ipv4_is_multicast(oldflp->fl4_dst))
fl.fl4_src = inet_select_addr(dev_out, 0,
fl.fl4_scope);
else if (!oldflp->fl4_dst)
fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
if (dev_out)
dev_put(dev_out);
- dev_out = &loopback_dev;
+ dev_out = net->loopback_dev;
dev_hold(dev_out);
- fl.oif = loopback_dev.ifindex;
+ fl.oif = net->loopback_dev->ifindex;
res.type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
}
- if (fib_lookup(&fl, &res)) {
+ if (fib_lookup(net, &fl, &res)) {
res.fi = NULL;
if (oldflp->oif) {
/* Apparently, routing tables are wrong. Assume,
fl.fl4_src = fl.fl4_dst;
if (dev_out)
dev_put(dev_out);
- dev_out = &loopback_dev;
+ dev_out = net->loopback_dev;
dev_hold(dev_out);
fl.oif = dev_out->ifindex;
if (res.fi)
else
#endif
if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
- fib_select_default(&fl, &res);
+ fib_select_default(net, &fl, &res);
if (!fl.fl4_src)
fl.fl4_src = FIB_RES_PREFSRC(res);
out: return err;
}
-int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
+int __ip_route_output_key(struct net *net, struct rtable **rp,
+ const struct flowi *flp)
{
unsigned hash;
struct rtable *rth;
- hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5), flp->fl4_tos);
+ if (!rt_caching(net))
+ goto slow_output;
+
+ hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
rcu_read_lock_bh();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->u.rt_next)) {
+ rth = rcu_dereference(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == flp->fl4_dst &&
rth->fl.fl4_src == flp->fl4_src &&
rth->fl.iif == 0 &&
rth->fl.oif == flp->oif &&
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark == flp->fl4_fwmark &&
-#endif
+ rth->fl.mark == flp->mark &&
!((rth->fl.fl4_tos ^ flp->fl4_tos) &
- (IPTOS_RT_MASK | RTO_ONLINK))) {
-
- /* check for multipath routes and choose one if
- * necessary
- */
- if (multipath_select_route(flp, rth, rp)) {
- dst_hold(&(*rp)->u.dst);
- RT_CACHE_STAT_INC(out_hit);
- rcu_read_unlock_bh();
- return 0;
- }
-
- rth->u.dst.lastuse = jiffies;
- dst_hold(&rth->u.dst);
- rth->u.dst.__use++;
+ (IPTOS_RT_MASK | RTO_ONLINK)) &&
+ net_eq(dev_net(rth->u.dst.dev), net) &&
+ !rt_is_expired(rth)) {
+ dst_use(&rth->u.dst, jiffies);
RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock_bh();
*rp = rth;
}
rcu_read_unlock_bh();
- return ip_route_output_slow(rp, flp);
+slow_output:
+ return ip_route_output_slow(net, rp, flp);
+}
+
+EXPORT_SYMBOL_GPL(__ip_route_output_key);
+
+static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
+{
+}
+
+static struct dst_ops ipv4_dst_blackhole_ops = {
+ .family = AF_INET,
+ .protocol = __constant_htons(ETH_P_IP),
+ .destroy = ipv4_dst_destroy,
+ .check = ipv4_dst_check,
+ .update_pmtu = ipv4_rt_blackhole_update_pmtu,
+ .entries = ATOMIC_INIT(0),
+};
+
+
+static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
+{
+ struct rtable *ort = *rp;
+ struct rtable *rt = (struct rtable *)
+ dst_alloc(&ipv4_dst_blackhole_ops);
+
+ if (rt) {
+ struct dst_entry *new = &rt->u.dst;
+
+ atomic_set(&new->__refcnt, 1);
+ new->__use = 1;
+ new->input = dst_discard;
+ new->output = dst_discard;
+ memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
+
+ new->dev = ort->u.dst.dev;
+ if (new->dev)
+ dev_hold(new->dev);
+
+ rt->fl = ort->fl;
+
+ rt->idev = ort->idev;
+ if (rt->idev)
+ in_dev_hold(rt->idev);
+ rt->rt_genid = rt_genid(net);
+ rt->rt_flags = ort->rt_flags;
+ rt->rt_type = ort->rt_type;
+ rt->rt_dst = ort->rt_dst;
+ rt->rt_src = ort->rt_src;
+ rt->rt_iif = ort->rt_iif;
+ rt->rt_gateway = ort->rt_gateway;
+ rt->rt_spec_dst = ort->rt_spec_dst;
+ rt->peer = ort->peer;
+ if (rt->peer)
+ atomic_inc(&rt->peer->refcnt);
+
+ dst_free(new);
+ }
+
+ dst_release(&(*rp)->u.dst);
+ *rp = rt;
+ return (rt ? 0 : -ENOMEM);
}
-int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags)
+int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
+ struct sock *sk, int flags)
{
int err;
- if ((err = __ip_route_output_key(rp, flp)) != 0)
+ if ((err = __ip_route_output_key(net, rp, flp)) != 0)
return err;
if (flp->proto) {
flp->fl4_src = (*rp)->rt_src;
if (!flp->fl4_dst)
flp->fl4_dst = (*rp)->rt_dst;
- return xfrm_lookup((struct dst_entry **)rp, flp, sk, flags);
+ err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
+ flags ? XFRM_LOOKUP_WAIT : 0);
+ if (err == -EREMOTE)
+ err = ipv4_dst_blackhole(net, rp, flp);
+
+ return err;
}
return 0;
}
-int ip_route_output_key(struct rtable **rp, struct flowi *flp)
+EXPORT_SYMBOL_GPL(ip_route_output_flow);
+
+int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
{
- return ip_route_output_flow(rp, flp, NULL, 0);
+ return ip_route_output_flow(net, rp, flp, NULL, 0);
}
static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
int nowait, unsigned int flags)
{
- struct rtable *rt = (struct rtable*)skb->dst;
+ struct rtable *rt = skb->rtable;
struct rtmsg *r;
- struct nlmsghdr *nlh;
- unsigned char *b = skb->tail;
- struct rta_cacheinfo ci;
-#ifdef CONFIG_IP_MROUTE
- struct rtattr *eptr;
-#endif
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
- r = NLMSG_DATA(nlh);
+ struct nlmsghdr *nlh;
+ long expires;
+ u32 id = 0, ts = 0, tsage = 0, error;
+
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ r = nlmsg_data(nlh);
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
r->rtm_tos = rt->fl.fl4_tos;
r->rtm_table = RT_TABLE_MAIN;
+ NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
r->rtm_type = rt->rt_type;
r->rtm_scope = RT_SCOPE_UNIVERSE;
r->rtm_protocol = RTPROT_UNSPEC;
r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
- RTA_PUT(skb, RTA_DST, 4, &rt->rt_dst);
+
+ NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
+
if (rt->fl.fl4_src) {
r->rtm_src_len = 32;
- RTA_PUT(skb, RTA_SRC, 4, &rt->fl.fl4_src);
+ NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
}
if (rt->u.dst.dev)
- RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);
+ NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
#ifdef CONFIG_NET_CLS_ROUTE
if (rt->u.dst.tclassid)
- RTA_PUT(skb, RTA_FLOW, 4, &rt->u.dst.tclassid);
-#endif
-#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
- if (rt->rt_multipath_alg != IP_MP_ALG_NONE) {
- __u32 alg = rt->rt_multipath_alg;
-
- RTA_PUT(skb, RTA_MP_ALGO, 4, &alg);
- }
+ NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
#endif
if (rt->fl.iif)
- RTA_PUT(skb, RTA_PREFSRC, 4, &rt->rt_spec_dst);
+ NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
else if (rt->rt_src != rt->fl.fl4_src)
- RTA_PUT(skb, RTA_PREFSRC, 4, &rt->rt_src);
+ NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
+
if (rt->rt_dst != rt->rt_gateway)
- RTA_PUT(skb, RTA_GATEWAY, 4, &rt->rt_gateway);
+ NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
+
if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
- goto rtattr_failure;
- ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
- ci.rta_used = rt->u.dst.__use;
- ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
- if (rt->u.dst.expires)
- ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies);
- else
- ci.rta_expires = 0;
- ci.rta_error = rt->u.dst.error;
- ci.rta_id = ci.rta_ts = ci.rta_tsage = 0;
+ goto nla_put_failure;
+
+ error = rt->u.dst.error;
+ expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
if (rt->peer) {
- ci.rta_id = rt->peer->ip_id_count;
+ id = rt->peer->ip_id_count;
if (rt->peer->tcp_ts_stamp) {
- ci.rta_ts = rt->peer->tcp_ts;
- ci.rta_tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp;
+ ts = rt->peer->tcp_ts;
+ tsage = get_seconds() - rt->peer->tcp_ts_stamp;
}
}
-#ifdef CONFIG_IP_MROUTE
- eptr = (struct rtattr*)skb->tail;
-#endif
- RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
+
if (rt->fl.iif) {
#ifdef CONFIG_IP_MROUTE
- u32 dst = rt->rt_dst;
+ __be32 dst = rt->rt_dst;
- if (MULTICAST(dst) && !LOCAL_MCAST(dst) &&
- ipv4_devconf.mc_forwarding) {
+ if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
+ IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) {
int err = ipmr_get_route(skb, r, nowait);
if (err <= 0) {
if (!nowait) {
if (err == 0)
return 0;
- goto nlmsg_failure;
+ goto nla_put_failure;
} else {
if (err == -EMSGSIZE)
- goto nlmsg_failure;
- ((struct rta_cacheinfo*)RTA_DATA(eptr))->rta_error = err;
+ goto nla_put_failure;
+ error = err;
}
}
} else
#endif
- RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
+ NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
}
- nlh->nlmsg_len = skb->tail - b;
- return skb->len;
+ if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
+ expires, error) < 0)
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
-nlmsg_failure:
-rtattr_failure:
- skb_trim(skb, b - skb->data);
- return -1;
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
-int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
+static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
{
- struct rtattr **rta = arg;
- struct rtmsg *rtm = NLMSG_DATA(nlh);
+ struct net *net = sock_net(in_skb->sk);
+ struct rtmsg *rtm;
+ struct nlattr *tb[RTA_MAX+1];
struct rtable *rt = NULL;
- u32 dst = 0;
- u32 src = 0;
- int iif = 0;
- int err = -ENOBUFS;
+ __be32 dst = 0;
+ __be32 src = 0;
+ u32 iif;
+ int err;
struct sk_buff *skb;
+ err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
+ if (err < 0)
+ goto errout;
+
+ rtm = nlmsg_data(nlh);
+
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!skb)
- goto out;
+ if (skb == NULL) {
+ err = -ENOBUFS;
+ goto errout;
+ }
/* Reserve room for dummy headers, this skb can pass
through good chunk of routing engine.
*/
- skb->mac.raw = skb->data;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+
+ /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
+ ip_hdr(skb)->protocol = IPPROTO_ICMP;
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
- if (rta[RTA_SRC - 1])
- memcpy(&src, RTA_DATA(rta[RTA_SRC - 1]), 4);
- if (rta[RTA_DST - 1])
- memcpy(&dst, RTA_DATA(rta[RTA_DST - 1]), 4);
- if (rta[RTA_IIF - 1])
- memcpy(&iif, RTA_DATA(rta[RTA_IIF - 1]), sizeof(int));
+ src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
+ dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
+ iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
if (iif) {
- struct net_device *dev = __dev_get_by_index(iif);
- err = -ENODEV;
- if (!dev)
- goto out_free;
+ struct net_device *dev;
+
+ dev = __dev_get_by_index(net, iif);
+ if (dev == NULL) {
+ err = -ENODEV;
+ goto errout_free;
+ }
+
skb->protocol = htons(ETH_P_IP);
skb->dev = dev;
local_bh_disable();
err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
local_bh_enable();
- rt = (struct rtable*)skb->dst;
- if (!err && rt->u.dst.error)
+
+ rt = skb->rtable;
+ if (err == 0 && rt->u.dst.error)
err = -rt->u.dst.error;
} else {
- struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dst,
- .saddr = src,
- .tos = rtm->rtm_tos } } };
- int oif = 0;
- if (rta[RTA_OIF - 1])
- memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
- fl.oif = oif;
- err = ip_route_output_key(&rt, &fl);
+ struct flowi fl = {
+ .nl_u = {
+ .ip4_u = {
+ .daddr = dst,
+ .saddr = src,
+ .tos = rtm->rtm_tos,
+ },
+ },
+ .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
+ };
+ err = ip_route_output_key(net, &rt, &fl);
}
+
if (err)
- goto out_free;
+ goto errout_free;
- skb->dst = &rt->u.dst;
+ skb->rtable = rt;
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
- NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
-
err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
- RTM_NEWROUTE, 0, 0);
- if (!err)
- goto out_free;
- if (err < 0) {
- err = -EMSGSIZE;
- goto out_free;
- }
+ RTM_NEWROUTE, 0, 0);
+ if (err <= 0)
+ goto errout_free;
- err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
- if (err > 0)
- err = 0;
-out: return err;
+ err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
+errout:
+ return err;
-out_free:
+errout_free:
kfree_skb(skb);
- goto out;
+ goto errout;
}
int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct rtable *rt;
int h, s_h;
int idx, s_idx;
+ struct net *net;
+
+ net = sock_net(skb->sk);
s_h = cb->args[0];
+ if (s_h < 0)
+ s_h = 0;
s_idx = idx = cb->args[1];
- for (h = 0; h <= rt_hash_mask; h++) {
- if (h < s_h) continue;
- if (h > s_h)
- s_idx = 0;
+ for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
+ if (!rt_hash_table[h].chain)
+ continue;
rcu_read_lock_bh();
for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
- rt = rcu_dereference(rt->u.rt_next), idx++) {
- if (idx < s_idx)
+ rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
+ if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
+ continue;
+ if (rt_is_expired(rt))
continue;
skb->dst = dst_clone(&rt->u.dst);
if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1, NLM_F_MULTI) <= 0) {
dst_release(xchg(&skb->dst, NULL));
rcu_read_unlock_bh();
void ip_rt_multicast_event(struct in_device *in_dev)
{
- rt_cache_flush(0);
+ rt_cache_flush(dev_net(in_dev->dev), 0);
}
#ifdef CONFIG_SYSCTL
-static int flush_delay;
-
-static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
+static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
struct file *filp, void __user *buffer,
size_t *lenp, loff_t *ppos)
{
if (write) {
- proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
- rt_cache_flush(flush_delay);
+ int flush_delay;
+ ctl_table ctl;
+ struct net *net;
+
+ memcpy(&ctl, __ctl, sizeof(ctl));
+ ctl.data = &flush_delay;
+ proc_dointvec(&ctl, write, filp, buffer, lenp, ppos);
+
+ net = (struct net *)__ctl->extra1;
+ rt_cache_flush(net, flush_delay);
return 0;
- }
+ }
return -EINVAL;
}
static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
- int __user *name,
- int nlen,
void __user *oldval,
size_t __user *oldlenp,
void __user *newval,
- size_t newlen,
- void **context)
+ size_t newlen)
{
int delay;
+ struct net *net;
if (newlen != sizeof(int))
return -EINVAL;
if (get_user(delay, (int __user *)newval))
- return -EFAULT;
- rt_cache_flush(delay);
+ return -EFAULT;
+ net = (struct net *)table->extra1;
+ rt_cache_flush(net, delay);
return 0;
}
-ctl_table ipv4_route_table[] = {
- {
- .ctl_name = NET_IPV4_ROUTE_FLUSH,
- .procname = "flush",
- .data = &flush_delay,
- .maxlen = sizeof(int),
- .mode = 0200,
- .proc_handler = &ipv4_sysctl_rtcache_flush,
- .strategy = &ipv4_sysctl_rtcache_flush_strategy,
- },
- {
- .ctl_name = NET_IPV4_ROUTE_MIN_DELAY,
- .procname = "min_delay",
- .data = &ip_rt_min_delay,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
- },
- {
- .ctl_name = NET_IPV4_ROUTE_MAX_DELAY,
- .procname = "max_delay",
- .data = &ip_rt_max_delay,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
- },
+static void rt_secret_reschedule(int old)
+{
+ struct net *net;
+ int new = ip_rt_secret_interval;
+ int diff = new - old;
+
+ if (!diff)
+ return;
+
+ rtnl_lock();
+ for_each_net(net) {
+ int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
+
+ if (!new)
+ continue;
+
+ if (deleted) {
+ long time = net->ipv4.rt_secret_timer.expires - jiffies;
+
+ if (time <= 0 || (time += diff) <= 0)
+ time = 0;
+
+ net->ipv4.rt_secret_timer.expires = time;
+ } else
+ net->ipv4.rt_secret_timer.expires = new;
+
+ net->ipv4.rt_secret_timer.expires += jiffies;
+ add_timer(&net->ipv4.rt_secret_timer);
+ }
+ rtnl_unlock();
+}
+
+static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
+ struct file *filp,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int old = ip_rt_secret_interval;
+ int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos);
+
+ rt_secret_reschedule(old);
+
+ return ret;
+}
+
+static int ipv4_sysctl_rt_secret_interval_strategy(ctl_table *table,
+ void __user *oldval,
+ size_t __user *oldlenp,
+ void __user *newval,
+ size_t newlen)
+{
+ int old = ip_rt_secret_interval;
+ int ret = sysctl_jiffies(table, oldval, oldlenp, newval, newlen);
+
+ rt_secret_reschedule(old);
+
+ return ret;
+}
+
+static ctl_table ipv4_route_table[] = {
{
.ctl_name = NET_IPV4_ROUTE_GC_THRESH,
.procname = "gc_thresh",
.data = &ipv4_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_MAX_SIZE,
.data = &ip_rt_max_size,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
/* Deprecated. Use gc_min_interval_ms */
-
+
.ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
.procname = "gc_min_interval",
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_ms_jiffies,
- .strategy = &sysctl_ms_jiffies,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ .strategy = sysctl_ms_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT,
.data = &ip_rt_gc_timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_INTERVAL,
.data = &ip_rt_gc_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD,
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER,
.data = &ip_rt_redirect_number,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE,
.data = &ip_rt_redirect_silence,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_ERROR_COST,
.data = &ip_rt_error_cost,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_ERROR_BURST,
.data = &ip_rt_error_burst,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY,
.data = &ip_rt_gc_elasticity,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES,
.data = &ip_rt_mtu_expires,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_MIN_PMTU,
.data = &ip_rt_min_pmtu,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS,
.data = &ip_rt_min_advmss,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL,
.data = &ip_rt_secret_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = ipv4_sysctl_rt_secret_interval,
+ .strategy = ipv4_sysctl_rt_secret_interval_strategy,
},
{ .ctl_name = 0 }
};
-#endif
-#ifdef CONFIG_NET_CLS_ROUTE
-struct ip_rt_acct *ip_rt_acct;
+static struct ctl_table empty[1];
-/* This code sucks. But you should have seen it before! --RR */
+static struct ctl_table ipv4_skeleton[] =
+{
+ { .procname = "route", .ctl_name = NET_IPV4_ROUTE,
+ .mode = 0555, .child = ipv4_route_table},
+ { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH,
+ .mode = 0555, .child = empty},
+ { }
+};
-/* IP route accounting ptr for this logical cpu number. */
-#define IP_RT_ACCT_CPU(i) (ip_rt_acct + i * 256)
+static __net_initdata struct ctl_path ipv4_path[] = {
+ { .procname = "net", .ctl_name = CTL_NET, },
+ { .procname = "ipv4", .ctl_name = NET_IPV4, },
+ { },
+};
-#ifdef CONFIG_PROC_FS
-static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
- int length, int *eof, void *data)
-{
- unsigned int i;
+static struct ctl_table ipv4_route_flush_table[] = {
+ {
+ .ctl_name = NET_IPV4_ROUTE_FLUSH,
+ .procname = "flush",
+ .maxlen = sizeof(int),
+ .mode = 0200,
+ .proc_handler = ipv4_sysctl_rtcache_flush,
+ .strategy = ipv4_sysctl_rtcache_flush_strategy,
+ },
+ { .ctl_name = 0 },
+};
- if ((offset & 3) || (length & 3))
- return -EIO;
+static __net_initdata struct ctl_path ipv4_route_path[] = {
+ { .procname = "net", .ctl_name = CTL_NET, },
+ { .procname = "ipv4", .ctl_name = NET_IPV4, },
+ { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
+ { },
+};
- if (offset >= sizeof(struct ip_rt_acct) * 256) {
- *eof = 1;
- return 0;
- }
+static __net_init int sysctl_route_net_init(struct net *net)
+{
+ struct ctl_table *tbl;
- if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
- length = sizeof(struct ip_rt_acct) * 256 - offset;
- *eof = 1;
+ tbl = ipv4_route_flush_table;
+ if (net != &init_net) {
+ tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_dup;
}
+ tbl[0].extra1 = net;
- offset /= sizeof(u32);
+ net->ipv4.route_hdr =
+ register_net_sysctl_table(net, ipv4_route_path, tbl);
+ if (net->ipv4.route_hdr == NULL)
+ goto err_reg;
+ return 0;
- if (length > 0) {
- u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset;
- u32 *dst = (u32 *) buffer;
+err_reg:
+ if (tbl != ipv4_route_flush_table)
+ kfree(tbl);
+err_dup:
+ return -ENOMEM;
+}
- /* Copy first cpu. */
- *start = buffer;
- memcpy(dst, src, length);
+static __net_exit void sysctl_route_net_exit(struct net *net)
+{
+ struct ctl_table *tbl;
- /* Add the other cpus in, one int at a time */
- for_each_cpu(i) {
- unsigned int j;
+ tbl = net->ipv4.route_hdr->ctl_table_arg;
+ unregister_net_sysctl_table(net->ipv4.route_hdr);
+ BUG_ON(tbl == ipv4_route_flush_table);
+ kfree(tbl);
+}
- src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
+static __net_initdata struct pernet_operations sysctl_route_ops = {
+ .init = sysctl_route_net_init,
+ .exit = sysctl_route_net_exit,
+};
+#endif
- for (j = 0; j < length/4; j++)
- dst[j] += src[j];
- }
+
+static __net_init int rt_secret_timer_init(struct net *net)
+{
+ atomic_set(&net->ipv4.rt_genid,
+ (int) ((num_physpages ^ (num_physpages>>8)) ^
+ (jiffies ^ (jiffies >> 7))));
+
+ net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
+ net->ipv4.rt_secret_timer.data = (unsigned long)net;
+ init_timer_deferrable(&net->ipv4.rt_secret_timer);
+
+ if (ip_rt_secret_interval) {
+ net->ipv4.rt_secret_timer.expires =
+ jiffies + net_random() % ip_rt_secret_interval +
+ ip_rt_secret_interval;
+ add_timer(&net->ipv4.rt_secret_timer);
}
- return length;
+ return 0;
}
-#endif /* CONFIG_PROC_FS */
+
+static __net_exit void rt_secret_timer_exit(struct net *net)
+{
+ del_timer_sync(&net->ipv4.rt_secret_timer);
+}
+
+static __net_initdata struct pernet_operations rt_secret_timer_ops = {
+ .init = rt_secret_timer_init,
+ .exit = rt_secret_timer_exit,
+};
+
+
+#ifdef CONFIG_NET_CLS_ROUTE
+struct ip_rt_acct *ip_rt_acct __read_mostly;
#endif /* CONFIG_NET_CLS_ROUTE */
static __initdata unsigned long rhash_entries;
{
int rc = 0;
- rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^
- (jiffies ^ (jiffies >> 7)));
-
#ifdef CONFIG_NET_CLS_ROUTE
- {
- int order;
- for (order = 0;
- (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
- /* NOTHING */;
- ip_rt_acct = (struct ip_rt_acct *)__get_free_pages(GFP_KERNEL, order);
+ ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
if (!ip_rt_acct)
panic("IP: failed to allocate ip_rt_acct\n");
- memset(ip_rt_acct, 0, PAGE_SIZE << order);
- }
#endif
- ipv4_dst_ops.kmem_cachep = kmem_cache_create("ip_dst_cache",
- sizeof(struct rtable),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ ipv4_dst_ops.kmem_cachep =
+ kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
- if (!ipv4_dst_ops.kmem_cachep)
- panic("IP: failed to allocate ip_dst_cache\n");
+ ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
rt_hash_table = (struct rt_hash_bucket *)
alloc_large_system_hash("IP route cache",
sizeof(struct rt_hash_bucket),
rhash_entries,
(num_physpages >= 128 * 1024) ?
- (27 - PAGE_SHIFT) :
- (29 - PAGE_SHIFT),
- HASH_HIGHMEM,
+ 15 : 17,
+ 0,
&rt_hash_log,
&rt_hash_mask,
0);
ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
ip_rt_max_size = (rt_hash_mask + 1) * 16;
- rt_cache_stat = alloc_percpu(struct rt_cache_stat);
- if (!rt_cache_stat)
- return -ENOMEM;
-
devinet_init();
ip_fib_init();
- init_timer(&rt_flush_timer);
- rt_flush_timer.function = rt_run_flush;
- init_timer(&rt_periodic_timer);
- rt_periodic_timer.function = rt_check_expire;
- init_timer(&rt_secret_timer);
- rt_secret_timer.function = rt_secret_rebuild;
-
/* All the timers, started at system startup tend
to synchronize. Perturb it a bit.
*/
- rt_periodic_timer.expires = jiffies + net_random() % ip_rt_gc_interval +
- ip_rt_gc_interval;
- add_timer(&rt_periodic_timer);
+ schedule_delayed_work(&expires_work,
+ net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
- rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval +
- ip_rt_secret_interval;
- add_timer(&rt_secret_timer);
+ if (register_pernet_subsys(&rt_secret_timer_ops))
+ printk(KERN_ERR "Unable to setup rt_secret_timer\n");
-#ifdef CONFIG_PROC_FS
- {
- struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
- if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
- !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
- proc_net_stat))) {
- free_percpu(rt_cache_stat);
- return -ENOMEM;
- }
- rtstat_pde->proc_fops = &rt_cpu_seq_fops;
- }
-#ifdef CONFIG_NET_CLS_ROUTE
- create_proc_read_entry("rt_acct", 0, proc_net, ip_rt_acct_read, NULL);
-#endif
-#endif
+ if (ip_rt_proc_init())
+ printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
xfrm4_init();
#endif
+ rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
+
+#ifdef CONFIG_SYSCTL
+ register_pernet_subsys(&sysctl_route_ops);
+#endif
return rc;
}
+#ifdef CONFIG_SYSCTL
+/*
+ * We really need to sanitize the damn ipv4 init order, then all
+ * this nonsense will go away.
+ */
+void __init ip_static_sysctl_init(void)
+{
+ register_sysctl_paths(ipv4_path, ipv4_skeleton);
+}
+#endif
+
EXPORT_SYMBOL(__ip_select_ident);
EXPORT_SYMBOL(ip_route_input);
EXPORT_SYMBOL(ip_route_output_key);