#ifndef _X_TABLES_H
#define _X_TABLES_H
+#include <linux/types.h>
+
#define XT_FUNCTION_MAXNAMELEN 30
#define XT_TABLE_MAXNAMELEN 32
{
union {
struct {
- u_int16_t match_size;
+ __u16 match_size;
/* Used by userspace */
char name[XT_FUNCTION_MAXNAMELEN-1];
- u_int8_t revision;
+ __u8 revision;
} user;
struct {
- u_int16_t match_size;
+ __u16 match_size;
/* Used inside the kernel */
struct xt_match *match;
} kernel;
/* Total length */
- u_int16_t match_size;
+ __u16 match_size;
} u;
unsigned char data[0];
{
union {
struct {
- u_int16_t target_size;
+ __u16 target_size;
/* Used by userspace */
char name[XT_FUNCTION_MAXNAMELEN-1];
- u_int8_t revision;
+ __u8 revision;
} user;
struct {
- u_int16_t target_size;
+ __u16 target_size;
/* Used inside the kernel */
struct xt_target *target;
} kernel;
/* Total length */
- u_int16_t target_size;
+ __u16 target_size;
} u;
unsigned char data[0];
{
char name[XT_FUNCTION_MAXNAMELEN-1];
- u_int8_t revision;
+ __u8 revision;
};
/* CONTINUE verdict for targets */
*/
struct _xt_align
{
- u_int8_t u8;
- u_int16_t u16;
- u_int32_t u32;
- u_int64_t u64;
+ __u8 u8;
+ __u16 u16;
+ __u32 u32;
+ __u64 u64;
};
#define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) \
struct xt_counters
{
- u_int64_t pcnt, bcnt; /* Packet and byte counters */
+ __u64 pcnt, bcnt; /* Packet and byte counters */
};
/* The argument to IPT_SO_ADD_COUNTERS. */
extern void xt_table_entry_swap_rcu(struct xt_table_info *old,
struct xt_table_info *new);
+ /*
+ * This helper is performance critical and must be inlined
+ */
+ static inline unsigned long ifname_compare_aligned(const char *_a,
+ const char *_b,
+ const char *_mask)
+ {
+ const unsigned long *a = (const unsigned long *)_a;
+ const unsigned long *b = (const unsigned long *)_b;
+ const unsigned long *mask = (const unsigned long *)_mask;
+ unsigned long ret;
+
+ ret = (a[0] ^ b[0]) & mask[0];
+ if (IFNAMSIZ > sizeof(unsigned long))
+ ret |= (a[1] ^ b[1]) & mask[1];
+ if (IFNAMSIZ > 2 * sizeof(unsigned long))
+ ret |= (a[2] ^ b[2]) & mask[2];
+ if (IFNAMSIZ > 3 * sizeof(unsigned long))
+ ret |= (a[3] ^ b[3]) & mask[3];
+ BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
+ return ret;
+ }
+
#ifdef CONFIG_COMPAT
#include <net/compat.h>
}
/**
+ * nla_policy_len - Determin the max. length of a policy
+ * @policy: policy to use
+ * @n: number of policies
+ *
+ * Determines the max. length of the policy. It is currently used
+ * to allocated Netlink buffers roughly the size of the actual
+ * message.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+ int
+ nla_policy_len(const struct nla_policy *p, int n)
+ {
+ int i, len = 0;
+
+ for (i = 0; i < n; i++) {
+ if (p->len)
+ len += nla_total_size(p->len);
+ else if (nla_attr_minlen[p->type])
+ len += nla_total_size(nla_attr_minlen[p->type]);
+ }
+
+ return len;
+ }
+
+ /**
* nla_parse - Parse a stream of attributes into a tb buffer
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
return d;
}
+#ifdef CONFIG_NET
/**
* __nla_reserve - reserve room for attribute on the skb
* @skb: socket buffer to reserve room on
return nla;
}
+EXPORT_SYMBOL(__nla_reserve);
/**
* __nla_reserve_nohdr - reserve room for attribute without header
return start;
}
+EXPORT_SYMBOL(__nla_reserve_nohdr);
/**
* nla_reserve - reserve room for attribute on the skb
return __nla_reserve(skb, attrtype, attrlen);
}
+EXPORT_SYMBOL(nla_reserve);
/**
* nla_reserve_nohdr - reserve room for attribute without header
return __nla_reserve_nohdr(skb, attrlen);
}
+EXPORT_SYMBOL(nla_reserve_nohdr);
/**
* __nla_put - Add a netlink attribute to a socket buffer
nla = __nla_reserve(skb, attrtype, attrlen);
memcpy(nla_data(nla), data, attrlen);
}
+EXPORT_SYMBOL(__nla_put);
/**
* __nla_put_nohdr - Add a netlink attribute without header
start = __nla_reserve_nohdr(skb, attrlen);
memcpy(start, data, attrlen);
}
+EXPORT_SYMBOL(__nla_put_nohdr);
/**
* nla_put - Add a netlink attribute to a socket buffer
__nla_put(skb, attrtype, attrlen, data);
return 0;
}
+EXPORT_SYMBOL(nla_put);
/**
* nla_put_nohdr - Add a netlink attribute without header
__nla_put_nohdr(skb, attrlen, data);
return 0;
}
+EXPORT_SYMBOL(nla_put_nohdr);
/**
* nla_append - Add a netlink attribute without header or padding
memcpy(skb_put(skb, attrlen), data, attrlen);
return 0;
}
+EXPORT_SYMBOL(nla_append);
+#endif
EXPORT_SYMBOL(nla_validate);
+ EXPORT_SYMBOL(nla_policy_len);
EXPORT_SYMBOL(nla_parse);
EXPORT_SYMBOL(nla_find);
EXPORT_SYMBOL(nla_strlcpy);
EXPORT_SYMBOL(nla_memcpy);
EXPORT_SYMBOL(nla_memcmp);
EXPORT_SYMBOL(nla_strcmp);
-EXPORT_SYMBOL(nla_append);
/*
* Unfortunatly, _b and _mask are not aligned to an int (or long int)
* Some arches dont care, unrolling the loop is a win on them.
+ * For other arches, we only have a 16bit alignement.
*/
static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
{
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- const unsigned long *a = (const unsigned long *)_a;
- const unsigned long *b = (const unsigned long *)_b;
- const unsigned long *mask = (const unsigned long *)_mask;
- unsigned long ret;
-
- ret = (a[0] ^ b[0]) & mask[0];
- if (IFNAMSIZ > sizeof(unsigned long))
- ret |= (a[1] ^ b[1]) & mask[1];
- if (IFNAMSIZ > 2 * sizeof(unsigned long))
- ret |= (a[2] ^ b[2]) & mask[2];
- if (IFNAMSIZ > 3 * sizeof(unsigned long))
- ret |= (a[3] ^ b[3]) & mask[3];
- BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
+ unsigned long ret = ifname_compare_aligned(_a, _b, _mask);
#else
unsigned long ret = 0;
+ const u16 *a = (const u16 *)_a;
+ const u16 *b = (const u16 *)_b;
+ const u16 *mask = (const u16 *)_mask;
int i;
- for (i = 0; i < IFNAMSIZ; i++)
- ret |= (_a[i] ^ _b[i]) & _mask[i];
+ for (i = 0; i < IFNAMSIZ/sizeof(u16); i++)
+ ret |= (a[i] ^ b[i]) & mask[i];
#endif
return ret;
}
&& unconditional(&e->arp)) || visited) {
unsigned int oldpos, size;
- if (t->verdict < -NF_MAX_VERDICT - 1) {
+ if ((strcmp(t->target.u.user.name,
+ ARPT_STANDARD_TARGET) == 0) &&
+ t->verdict < -NF_MAX_VERDICT - 1) {
duprintf("mark_source_chains: bad "
"negative verdict (%i)\n",
t->verdict);
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
- nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
- "nf_ct_icmpv6: ICMPv6 checksum failed\n");
+ if (LOG_INVALID(net, IPPROTO_ICMPV6))
+ nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
+ "nf_ct_icmpv6: ICMPv6 checksum failed ");
return -NF_ACCEPT;
}
return 0;
}
+
+ static int icmpv6_nlattr_tuple_size(void)
+ {
+ return nla_policy_len(icmpv6_nla_policy, CTA_PROTO_MAX + 1);
+ }
#endif
#ifdef CONFIG_SYSCTL
.error = icmpv6_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = icmpv6_tuple_to_nlattr,
+ .nlattr_tuple_size = icmpv6_nlattr_tuple_size,
.nlattr_to_tuple = icmpv6_nlattr_to_tuple,
.nla_policy = icmpv6_nla_policy,
#endif
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/mm.h>
+ #include <linux/rculist_nulls.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
clean_from_lists(struct nf_conn *ct)
{
pr_debug("clean_from_lists(%p)\n", ct);
- hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
- hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
/* Destroy all pending expectations */
nf_ct_remove_expectations(ct);
/* We overload first tuple to link into unconfirmed list. */
if (!nf_ct_is_confirmed(ct)) {
- BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
- hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
+ BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
}
NF_CT_STAT_INC(net, delete);
nf_ct_put(ct);
}
+ /*
+ * Warning :
+ * - Caller must take a reference on returned object
+ * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
+ * OR
+ * - Caller must lock nf_conntrack_lock before calling this function
+ */
struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_tuple_hash *h;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
unsigned int hash = hash_conntrack(tuple);
/* Disable BHs the entire time since we normally need to disable them
* at least once for the stats anyway.
*/
local_bh_disable();
- hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
+ begin:
+ hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
if (nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(net, found);
local_bh_enable();
}
NF_CT_STAT_INC(net, searched);
}
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(n) != hash)
+ goto begin;
local_bh_enable();
return NULL;
struct nf_conn *ct;
rcu_read_lock();
+ begin:
h = __nf_conntrack_find(net, tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
h = NULL;
+ else {
+ if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
+ nf_ct_put(ct);
+ goto begin;
+ }
+ }
}
rcu_read_unlock();
{
struct net *net = nf_ct_net(ct);
- hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
+ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&net->ct.hash[hash]);
- hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
+ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&net->ct.hash[repl_hash]);
}
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conn_help *help;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */
- hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode)
+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple))
goto out;
- hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode)
+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
&h->tuple))
goto out;
/* Remove from unconfirmed list */
- hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
__nf_conntrack_hash_insert(ct, hash, repl_hash);
/* Timer relative to confirmation time, not original
{
struct net *net = nf_ct_net(ignored_conntrack);
struct nf_conntrack_tuple_hash *h;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
unsigned int hash = hash_conntrack(tuple);
/* Disable BHs the entire time since we need to disable them at
* least once for the stats anyway.
*/
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
+ hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(net, found);
/* Use oldest entry, which is roughly LRU */
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct = NULL, *tmp;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
unsigned int i, cnt = 0;
int dropped = 0;
rcu_read_lock();
for (i = 0; i < nf_conntrack_htable_size; i++) {
- hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash],
- hnode) {
+ hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
+ hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
ct = tmp;
#ifdef CONFIG_NET_NS
ct->ct_net = net;
#endif
- INIT_RCU_HEAD(&ct->rcu);
return ct;
}
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
- static void nf_conntrack_free_rcu(struct rcu_head *head)
- {
- struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
-
- nf_ct_ext_free(ct);
- kmem_cache_free(nf_conntrack_cachep, ct);
- }
-
void nf_conntrack_free(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
nf_ct_ext_destroy(ct);
atomic_dec(&net->ct.count);
- call_rcu(&ct->rcu, nf_conntrack_free_rcu);
+ nf_ct_ext_free(ct);
+ kmem_cache_free(nf_conntrack_cachep, ct);
}
EXPORT_SYMBOL_GPL(nf_conntrack_free);
}
/* Overload tuple linked list to put us in unconfirmed list. */
- hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
+ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&net->ct.unconfirmed);
spin_unlock_bh(&nf_conntrack_lock);
NF_CT_ASSERT(skb->nfct);
ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
- if (ret < 0) {
+ if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
pr_debug("nf_conntrack_in: Can't track with proto module\n");
return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
+
+ int nf_ct_port_nlattr_tuple_size(void)
+ {
+ return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
+ }
+ EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
#endif
/* Used by ipt_REJECT and ip6t_REJECT. */
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
spin_lock_bh(&nf_conntrack_lock);
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
- hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) {
+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
goto found;
}
}
- hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) {
+ hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
set_bit(IPS_DYING_BIT, &ct->status);
return 1;
}
- void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size)
+ void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
{
if (vmalloced)
vfree(hash);
}
}
- struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced)
+ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
{
- struct hlist_head *hash;
- unsigned int size, i;
+ struct hlist_nulls_head *hash;
+ unsigned int nr_slots, i;
+ size_t sz;
*vmalloced = 0;
- size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
- hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
- get_order(sizeof(struct hlist_head)
- * size));
+ BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
+ nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+ sz = nr_slots * sizeof(struct hlist_nulls_head);
+ hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
+ get_order(sz));
if (!hash) {
*vmalloced = 1;
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
- hash = vmalloc(sizeof(struct hlist_head) * size);
+ hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
}
- if (hash)
- for (i = 0; i < size; i++)
- INIT_HLIST_HEAD(&hash[i]);
+ if (hash && nulls)
+ for (i = 0; i < nr_slots; i++)
+ INIT_HLIST_NULLS_HEAD(&hash[i], i);
return hash;
}
int i, bucket, vmalloced, old_vmalloced;
unsigned int hashsize, old_size;
int rnd;
- struct hlist_head *hash, *old_hash;
+ struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h;
/* On boot, we can set this without any fancy locking. */
if (!hashsize)
return -EINVAL;
- hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);
+ hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
if (!hash)
return -ENOMEM;
*/
spin_lock_bh(&nf_conntrack_lock);
for (i = 0; i < nf_conntrack_htable_size; i++) {
- while (!hlist_empty(&init_net.ct.hash[i])) {
- h = hlist_entry(init_net.ct.hash[i].first,
- struct nf_conntrack_tuple_hash, hnode);
- hlist_del_rcu(&h->hnode);
+ while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
+ h = hlist_nulls_entry(init_net.ct.hash[i].first,
+ struct nf_conntrack_tuple_hash, hnnode);
+ hlist_nulls_del_rcu(&h->hnnode);
bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
- hlist_add_head(&h->hnode, &hash[bucket]);
+ hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
old_size = nf_conntrack_htable_size;
nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
sizeof(struct nf_conn),
- 0, 0, NULL);
+ 0, SLAB_DESTROY_BY_RCU, NULL);
if (!nf_conntrack_cachep) {
printk(KERN_ERR "Unable to create nf_conn slab cache\n");
ret = -ENOMEM;
int ret;
atomic_set(&net->ct.count, 0);
- INIT_HLIST_HEAD(&net->ct.unconfirmed);
+ INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
if (!net->ct.stat) {
ret = -ENOMEM;
if (ret < 0)
goto err_ecache;
net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
- &net->ct.hash_vmalloc);
+ &net->ct.hash_vmalloc, 1);
if (!net->ct.hash) {
ret = -ENOMEM;
printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rculist.h>
+ #include <linux/rculist_nulls.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ /*
+ * The general structure of a ctnetlink event is
+ *
+ * CTA_TUPLE_ORIG
+ * <l3/l4-proto-attributes>
+ * CTA_TUPLE_REPLY
+ * <l3/l4-proto-attributes>
+ * CTA_ID
+ * ...
+ * CTA_PROTOINFO
+ * <l4-proto-attributes>
+ * CTA_TUPLE_MASTER
+ * <l3/l4-proto-attributes>
+ *
+ * Therefore the formular is
+ *
+ * size = sizeof(headers) + sizeof(generic_nlas) + 3 * sizeof(tuple_nlas)
+ * + sizeof(protoinfo_nlas)
+ */
+ static struct sk_buff *
+ ctnetlink_alloc_skb(const struct nf_conntrack_tuple *tuple, gfp_t gfp)
+ {
+ struct nf_conntrack_l3proto *l3proto;
+ struct nf_conntrack_l4proto *l4proto;
+ int len;
+
+ #define NLA_TYPE_SIZE(type) nla_total_size(sizeof(type))
+
+ /* proto independant part */
+ len = NLMSG_SPACE(sizeof(struct nfgenmsg))
+ + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
+ + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
+ + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
+ + 3 * NLA_TYPE_SIZE(u_int8_t) /* CTA_PROTO_NUM */
+ + NLA_TYPE_SIZE(u_int32_t) /* CTA_ID */
+ + NLA_TYPE_SIZE(u_int32_t) /* CTA_STATUS */
+ #ifdef CONFIG_NF_CT_ACCT
+ + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
+ + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_PACKETS */
+ + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_BYTES */
+ #endif
+ + NLA_TYPE_SIZE(u_int32_t) /* CTA_TIMEOUT */
+ + nla_total_size(0) /* CTA_PROTOINFO */
+ + nla_total_size(0) /* CTA_HELP */
+ + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
+ #ifdef CONFIG_NF_CONNTRACK_SECMARK
+ + NLA_TYPE_SIZE(u_int32_t) /* CTA_SECMARK */
+ #endif
+ #ifdef CONFIG_NF_NAT_NEEDED
+ + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
+ + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_POS */
+ + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_BEFORE */
+ + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_AFTER */
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+ + NLA_TYPE_SIZE(u_int32_t) /* CTA_MARK */
+ #endif
+ ;
+
+ #undef NLA_TYPE_SIZE
+
+ rcu_read_lock();
+ l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
+ len += l3proto->nla_size;
+
+ l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
+ len += l4proto->nla_size;
+ rcu_read_unlock();
+
+ return alloc_skb(len, gfp);
+ }
+
static int ctnetlink_conntrack_event(struct notifier_block *this,
unsigned long events, void *ptr)
{
if (!item->report && !nfnetlink_has_listeners(group))
return NOTIFY_DONE;
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
+ skb = ctnetlink_alloc_skb(tuple(ct, IP_CT_DIR_ORIGINAL), GFP_ATOMIC);
if (!skb)
return NOTIFY_DONE;
{
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
restart:
- hlist_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
- hnode) {
+ hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
+ hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
ct = nf_ct_tuplehash_to_ctrack(h);
+ if (!atomic_inc_not_zero(&ct->ct_general.use))
+ continue;
/* Dump entries of a given L3 protocol number.
* If it is not specified, ie. l3proto == 0,
* then dump everything. */
if (l3proto && nf_ct_l3num(ct) != l3proto)
- continue;
+ goto releasect;
if (cb->args[1]) {
if (ct != last)
- continue;
+ goto releasect;
cb->args[1] = 0;
}
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
IPCTNL_MSG_CT_NEW,
1, ct) < 0) {
- if (!atomic_inc_not_zero(&ct->ct_general.use))
- continue;
cb->args[1] = (unsigned long)ct;
goto out;
}
if (acct)
memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
}
+ releasect:
+ nf_ct_put(ct);
}
if (cb->args[1]) {
cb->args[1] = 0;
if (err < 0)
goto err2;
- master_h = __nf_conntrack_find(&init_net, &master);
+ master_h = nf_conntrack_find_get(&init_net, &master);
if (master_h == NULL) {
err = -ENOENT;
goto err2;
}
master_ct = nf_ct_tuplehash_to_ctrack(master_h);
- nf_conntrack_get(&master_ct->ct_general);
__set_bit(IPS_EXPECTED_BIT, &ct->status);
ct->master = master_ct;
}
goto out;
}
+ exp->class = 0;
exp->expectfn = NULL;
exp->flags = 0;
exp->master = ct;
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include <asm/unaligned.h>
#include <net/tcp.h>
for (i = 0;
i < (opsize - TCPOLEN_SACK_BASE);
i += TCPOLEN_SACK_PERBLOCK) {
- tmp = ntohl(*((__be32 *)(ptr+i)+1));
+ tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
if (after(tmp, *sack))
*sack = tmp;
*/
if (nf_ct_kill(ct))
return -NF_REPEAT;
- return -NF_DROP;
+ return NF_DROP;
}
/* Fall through */
case TCP_CONNTRACK_IGNORE:
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: killing out of sync session ");
nf_ct_kill(ct);
- return -NF_DROP;
+ return NF_DROP;
}
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
return 0;
}
+
+ static int tcp_nlattr_size(void)
+ {
+ return nla_total_size(0) /* CTA_PROTOINFO_TCP */
+ + nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
+ }
+
+ static int tcp_nlattr_tuple_size(void)
+ {
+ return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
+ }
#endif
#ifdef CONFIG_SYSCTL
.error = tcp_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nlattr = tcp_to_nlattr,
+ .nlattr_size = tcp_nlattr_size,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
+ .nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_SYSCTL
.error = tcp_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nlattr = tcp_to_nlattr,
+ .nlattr_size = tcp_nlattr_size,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
+ .nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_SYSCTL