* make per-netns conntrack hash
Other solution is to add ->ct_net pointer to tuplehashes and still has one
hash, I tried that it's ugly and requires more code deep down in protocol
modules et al.
* propagate netns pointer to where needed, e. g. to conntrack iterators.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
16 files changed:
unsigned int size);
extern struct nf_conntrack_tuple_hash *
unsigned int size);
extern struct nf_conntrack_tuple_hash *
-__nf_conntrack_find(const struct nf_conntrack_tuple *tuple);
+__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple);
extern void nf_conntrack_hash_insert(struct nf_conn *ct);
extern void nf_conntrack_hash_insert(struct nf_conn *ct);
-extern void nf_conntrack_flush(void);
+extern void nf_conntrack_flush(struct net *net);
extern bool nf_ct_get_tuplepr(const struct sk_buff *skb,
unsigned int nhoff, u_int16_t l3num,
extern bool nf_ct_get_tuplepr(const struct sk_buff *skb,
unsigned int nhoff, u_int16_t l3num,
/* Iterate over all conntracks: if iter returns true, it's deleted. */
extern void
/* Iterate over all conntracks: if iter returns true, it's deleted. */
extern void
-nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data);
+nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
extern void nf_conntrack_free(struct nf_conn *ct);
extern struct nf_conn *
nf_conntrack_alloc(struct net *net,
extern void nf_conntrack_free(struct nf_conn *ct);
extern struct nf_conn *
nf_conntrack_alloc(struct net *net,
/* Find a connection corresponding to a tuple. */
extern struct nf_conntrack_tuple_hash *
/* Find a connection corresponding to a tuple. */
extern struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple);
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple);
extern int __nf_conntrack_confirm(struct sk_buff *skb);
extern int __nf_conntrack_confirm(struct sk_buff *skb);
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *proto);
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *proto);
-extern struct hlist_head *nf_conntrack_hash;
extern spinlock_t nf_conntrack_lock ;
extern struct hlist_head unconfirmed;
extern spinlock_t nf_conntrack_lock ;
extern struct hlist_head unconfirmed;
struct netns_ct {
atomic_t count;
struct netns_ct {
atomic_t count;
+ struct hlist_head *hash;
+ int hash_vmalloc;
and forget them. */
NF_CT_ASSERT(dev->ifindex != 0);
and forget them. */
NF_CT_ASSERT(dev->ifindex != 0);
- nf_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex);
+ nf_ct_iterate_cleanup(&init_net, device_cmp,
+ (void *)(long)dev->ifindex);
- h = nf_conntrack_find_get(&tuple);
+ h = nf_conntrack_find_get(sock_net(sk), &tuple);
if (h) {
struct sockaddr_in sin;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
if (h) {
struct sockaddr_in sin;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
for (st->bucket = 0;
st->bucket < nf_conntrack_htable_size;
st->bucket++) {
for (st->bucket = 0;
st->bucket < nf_conntrack_htable_size;
st->bucket++) {
- n = rcu_dereference(nf_conntrack_hash[st->bucket].first);
+ n = rcu_dereference(init_net.ct.hash[st->bucket].first);
while (head == NULL) {
if (++st->bucket >= nf_conntrack_htable_size)
return NULL;
while (head == NULL) {
if (++st->bucket >= nf_conntrack_htable_size)
return NULL;
- head = rcu_dereference(nf_conntrack_hash[st->bucket].first);
+ head = rcu_dereference(init_net.ct.hash[st->bucket].first);
- h = nf_conntrack_find_get(&innertuple);
+ h = nf_conntrack_find_get(&init_net, &innertuple);
if (!h) {
pr_debug("icmp_error_message: no match\n");
return -NF_ACCEPT;
if (!h) {
pr_debug("icmp_error_message: no match\n");
return -NF_ACCEPT;
static void __exit nf_nat_cleanup(void)
{
static void __exit nf_nat_cleanup(void)
{
- nf_ct_iterate_cleanup(&clean_nat, NULL);
+ nf_ct_iterate_cleanup(&init_net, &clean_nat, NULL);
synchronize_rcu();
nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size);
nf_ct_l3proto_put(l3proto);
synchronize_rcu();
nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size);
nf_ct_l3proto_put(l3proto);
- h = nf_conntrack_find_get(&intuple);
+ h = nf_conntrack_find_get(&init_net, &intuple);
if (!h) {
pr_debug("icmpv6_error: no match\n");
return -NF_ACCEPT;
if (!h) {
pr_debug("icmpv6_error: no match\n");
return -NF_ACCEPT;
int nf_conntrack_max __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_max);
int nf_conntrack_max __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_max);
-struct hlist_head *nf_conntrack_hash __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_hash);
-
struct nf_conn nf_conntrack_untracked __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
unsigned int nf_ct_log_invalid __read_mostly;
HLIST_HEAD(unconfirmed);
struct nf_conn nf_conntrack_untracked __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
unsigned int nf_ct_log_invalid __read_mostly;
HLIST_HEAD(unconfirmed);
-static int nf_conntrack_vmalloc __read_mostly;
static struct kmem_cache *nf_conntrack_cachep __read_mostly;
DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
static struct kmem_cache *nf_conntrack_cachep __read_mostly;
DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
}
struct nf_conntrack_tuple_hash *
}
struct nf_conntrack_tuple_hash *
-__nf_conntrack_find(const struct nf_conntrack_tuple *tuple)
+__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_tuple_hash *h;
struct hlist_node *n;
{
struct nf_conntrack_tuple_hash *h;
struct hlist_node *n;
* at least once for the stats anyway.
*/
local_bh_disable();
* at least once for the stats anyway.
*/
local_bh_disable();
- hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
+ hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
if (nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(found);
local_bh_enable();
if (nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(found);
local_bh_enable();
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple)
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
rcu_read_lock();
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
rcu_read_lock();
- h = __nf_conntrack_find(tuple);
+ h = __nf_conntrack_find(net, tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
unsigned int hash,
unsigned int repl_hash)
{
unsigned int hash,
unsigned int repl_hash)
{
+ struct net *net = nf_ct_net(ct);
+
hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
- &nf_conntrack_hash[hash]);
hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
- &nf_conntrack_hash[repl_hash]);
+ &net->ct.hash[repl_hash]);
}
void nf_conntrack_hash_insert(struct nf_conn *ct)
}
void nf_conntrack_hash_insert(struct nf_conn *ct)
struct nf_conn_help *help;
struct hlist_node *n;
enum ip_conntrack_info ctinfo;
struct nf_conn_help *help;
struct hlist_node *n;
enum ip_conntrack_info ctinfo;
ct = nf_ct_get(skb, &ctinfo);
ct = nf_ct_get(skb, &ctinfo);
/* ipt_REJECT uses nf_conntrack_attach to attach related
ICMP/TCP RST packets in other direction. Actual packet
/* ipt_REJECT uses nf_conntrack_attach to attach related
ICMP/TCP RST packets in other direction. Actual packet
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */
- hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode)
+ hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple))
goto out;
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple))
goto out;
- hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode)
+ hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
&h->tuple))
goto out;
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
&h->tuple))
goto out;
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
{
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
{
+ struct net *net = nf_ct_net(ignored_conntrack);
struct nf_conntrack_tuple_hash *h;
struct hlist_node *n;
unsigned int hash = hash_conntrack(tuple);
struct nf_conntrack_tuple_hash *h;
struct hlist_node *n;
unsigned int hash = hash_conntrack(tuple);
* least once for the stats anyway.
*/
rcu_read_lock_bh();
* least once for the stats anyway.
*/
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
+ hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(found);
if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(found);
/* There's a small race here where we may free a just-assured
connection. Too bad: we're in trouble anyway. */
/* There's a small race here where we may free a just-assured
connection. Too bad: we're in trouble anyway. */
-static noinline int early_drop(unsigned int hash)
+static noinline int early_drop(struct net *net, unsigned int hash)
{
/* Use oldest entry, which is roughly LRU */
struct nf_conntrack_tuple_hash *h;
{
/* Use oldest entry, which is roughly LRU */
struct nf_conntrack_tuple_hash *h;
rcu_read_lock();
for (i = 0; i < nf_conntrack_htable_size; i++) {
rcu_read_lock();
for (i = 0; i < nf_conntrack_htable_size; i++) {
- hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
+ hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash],
hnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
hnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
if (nf_conntrack_max &&
unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
unsigned int hash = hash_conntrack(orig);
if (nf_conntrack_max &&
unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
unsigned int hash = hash_conntrack(orig);
- if (!early_drop(hash)) {
+ if (!early_drop(net, hash)) {
atomic_dec(&net->ct.count);
if (net_ratelimit())
printk(KERN_WARNING
atomic_dec(&net->ct.count);
if (net_ratelimit())
printk(KERN_WARNING
}
/* look for tuple match */
}
/* look for tuple match */
- h = nf_conntrack_find_get(&tuple);
+ h = nf_conntrack_find_get(&init_net, &tuple);
if (!h) {
h = init_conntrack(&init_net, &tuple, l3proto, l4proto, skb,
dataoff);
if (!h) {
h = init_conntrack(&init_net, &tuple, l3proto, l4proto, skb,
dataoff);
/* Bring out ya dead! */
static struct nf_conn *
/* Bring out ya dead! */
static struct nf_conn *
-get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
+get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
void *data, unsigned int *bucket)
{
struct nf_conntrack_tuple_hash *h;
void *data, unsigned int *bucket)
{
struct nf_conntrack_tuple_hash *h;
spin_lock_bh(&nf_conntrack_lock);
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
spin_lock_bh(&nf_conntrack_lock);
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
- hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) {
+ hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
goto found;
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
goto found;
-void
-nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
+void nf_ct_iterate_cleanup(struct net *net,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data)
{
struct nf_conn *ct;
unsigned int bucket = 0;
{
struct nf_conn *ct;
unsigned int bucket = 0;
- while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
+ while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
/* Time to push up daises... */
if (del_timer(&ct->timeout))
death_by_timeout((unsigned long)ct);
/* Time to push up daises... */
if (del_timer(&ct->timeout))
death_by_timeout((unsigned long)ct);
}
EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
}
EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
-void nf_conntrack_flush(void)
+void nf_conntrack_flush(struct net *net)
- nf_ct_iterate_cleanup(kill_all, NULL);
+ nf_ct_iterate_cleanup(net, kill_all, NULL);
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush);
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush);
nf_ct_event_cache_flush();
i_see_dead_people:
nf_ct_event_cache_flush();
i_see_dead_people:
+ nf_conntrack_flush(net);
if (atomic_read(&net->ct.count) != 0) {
schedule();
goto i_see_dead_people;
if (atomic_read(&net->ct.count) != 0) {
schedule();
goto i_see_dead_people;
rcu_assign_pointer(nf_ct_destroy, NULL);
kmem_cache_destroy(nf_conntrack_cachep);
rcu_assign_pointer(nf_ct_destroy, NULL);
kmem_cache_destroy(nf_conntrack_cachep);
- nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,
+ nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
nf_conntrack_htable_size);
nf_conntrack_acct_fini();
nf_conntrack_htable_size);
nf_conntrack_acct_fini();
*/
spin_lock_bh(&nf_conntrack_lock);
for (i = 0; i < nf_conntrack_htable_size; i++) {
*/
spin_lock_bh(&nf_conntrack_lock);
for (i = 0; i < nf_conntrack_htable_size; i++) {
- while (!hlist_empty(&nf_conntrack_hash[i])) {
- h = hlist_entry(nf_conntrack_hash[i].first,
+ while (!hlist_empty(&init_net.ct.hash[i])) {
+ h = hlist_entry(init_net.ct.hash[i].first,
struct nf_conntrack_tuple_hash, hnode);
hlist_del_rcu(&h->hnode);
bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
struct nf_conntrack_tuple_hash, hnode);
hlist_del_rcu(&h->hnode);
bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
}
}
old_size = nf_conntrack_htable_size;
}
}
old_size = nf_conntrack_htable_size;
- old_vmalloced = nf_conntrack_vmalloc;
- old_hash = nf_conntrack_hash;
+ old_vmalloced = init_net.ct.hash_vmalloc;
+ old_hash = init_net.ct.hash;
nf_conntrack_htable_size = hashsize;
nf_conntrack_htable_size = hashsize;
- nf_conntrack_vmalloc = vmalloced;
- nf_conntrack_hash = hash;
+ init_net.ct.hash_vmalloc = vmalloced;
+ init_net.ct.hash = hash;
nf_conntrack_hash_rnd = rnd;
spin_unlock_bh(&nf_conntrack_lock);
nf_conntrack_hash_rnd = rnd;
spin_unlock_bh(&nf_conntrack_lock);
max_factor = 4;
}
atomic_set(&net->ct.count, 0);
max_factor = 4;
}
atomic_set(&net->ct.count, 0);
- nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
- &nf_conntrack_vmalloc);
- if (!nf_conntrack_hash) {
+ net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
+ &net->ct.hash_vmalloc);
+ if (!net->ct.hash) {
printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
goto err_out;
}
printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
goto err_out;
}
err_free_conntrack_slab:
kmem_cache_destroy(nf_conntrack_cachep);
err_free_hash:
err_free_conntrack_slab:
kmem_cache_destroy(nf_conntrack_cachep);
err_free_hash:
- nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,
+ nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
nf_conntrack_htable_size);
err_out:
return -ENOMEM;
nf_conntrack_htable_size);
err_out:
return -ENOMEM;
hlist_for_each_entry(h, n, &unconfirmed, hnode)
unhelp(h, me);
for (i = 0; i < nf_conntrack_htable_size; i++) {
hlist_for_each_entry(h, n, &unconfirmed, hnode)
unhelp(h, me);
for (i = 0; i < nf_conntrack_htable_size; i++) {
- hlist_for_each_entry(h, n, &nf_conntrack_hash[i], hnode)
+ hlist_for_each_entry(h, n, &init_net.ct.hash[i], hnode)
unhelp(h, me);
}
spin_unlock_bh(&nf_conntrack_lock);
unhelp(h, me);
}
spin_unlock_bh(&nf_conntrack_lock);
last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
restart:
last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
restart:
- hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[cb->args[0]],
+ hlist_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
hnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
hnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
else {
/* Flush the whole table */
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
else {
/* Flush the whole table */
+ nf_conntrack_flush(&init_net);
return 0;
}
if (err < 0)
return err;
return 0;
}
if (err < 0)
return err;
- h = nf_conntrack_find_get(&tuple);
+ h = nf_conntrack_find_get(&init_net, &tuple);
- h = nf_conntrack_find_get(&tuple);
+ h = nf_conntrack_find_get(&init_net, &tuple);
spin_lock_bh(&nf_conntrack_lock);
if (cda[CTA_TUPLE_ORIG])
spin_lock_bh(&nf_conntrack_lock);
if (cda[CTA_TUPLE_ORIG])
- h = __nf_conntrack_find(&otuple);
+ h = __nf_conntrack_find(&init_net, &otuple);
else if (cda[CTA_TUPLE_REPLY])
else if (cda[CTA_TUPLE_REPLY])
- h = __nf_conntrack_find(&rtuple);
+ h = __nf_conntrack_find(&init_net, &rtuple);
if (h == NULL) {
struct nf_conntrack_tuple master;
if (h == NULL) {
struct nf_conntrack_tuple master;
if (err < 0)
goto out_unlock;
if (err < 0)
goto out_unlock;
- master_h = __nf_conntrack_find(&master);
+ master_h = __nf_conntrack_find(&init_net, &master);
if (master_h == NULL) {
err = -ENOENT;
goto out_unlock;
if (master_h == NULL) {
err = -ENOENT;
goto out_unlock;
return err;
/* Look for master conntrack of this expectation */
return err;
/* Look for master conntrack of this expectation */
- h = nf_conntrack_find_get(&master_tuple);
+ h = nf_conntrack_find_get(&init_net, &master_tuple);
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
pr_debug("trying to timeout ct or exp for tuple ");
nf_ct_dump_tuple(t);
pr_debug("trying to timeout ct or exp for tuple ");
nf_ct_dump_tuple(t);
- h = nf_conntrack_find_get(t);
+ h = nf_conntrack_find_get(&init_net, t);
if (h) {
sibling = nf_ct_tuplehash_to_ctrack(h);
pr_debug("setting timeout of conntrack %p to 0\n", sibling);
if (h) {
sibling = nf_ct_tuplehash_to_ctrack(h);
pr_debug("setting timeout of conntrack %p to 0\n", sibling);
synchronize_rcu();
/* Remove all contrack entries for this protocol */
synchronize_rcu();
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(kill_l3proto, proto);
+ nf_ct_iterate_cleanup(&init_net, kill_l3proto, proto);
}
EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
}
EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
synchronize_rcu();
/* Remove all contrack entries for this protocol */
synchronize_rcu();
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(kill_l4proto, l4proto);
+ nf_ct_iterate_cleanup(&init_net, kill_l4proto, l4proto);
}
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
}
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
for (st->bucket = 0;
st->bucket < nf_conntrack_htable_size;
st->bucket++) {
for (st->bucket = 0;
st->bucket < nf_conntrack_htable_size;
st->bucket++) {
- n = rcu_dereference(nf_conntrack_hash[st->bucket].first);
+ n = rcu_dereference(init_net.ct.hash[st->bucket].first);
while (head == NULL) {
if (++st->bucket >= nf_conntrack_htable_size)
return NULL;
while (head == NULL) {
if (++st->bucket >= nf_conntrack_htable_size)
return NULL;
- head = rcu_dereference(nf_conntrack_hash[st->bucket].first);
+ head = rcu_dereference(init_net.ct.hash[st->bucket].first);
/* check the saved connections */
list_for_each_entry_safe(conn, tmp, hash, list) {
/* check the saved connections */
list_for_each_entry_safe(conn, tmp, hash, list) {
- found = __nf_conntrack_find(&conn->tuple);
+ found = __nf_conntrack_find(&init_net, &conn->tuple);
found_ct = NULL;
if (found != NULL)
found_ct = NULL;
if (found != NULL)