#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/list_nulls.h>
#include <linux/timer.h>
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/security.h>
#include <linux/filter.h>
+#include <linux/rculist_nulls.h>
#include <asm/atomic.h>
#include <net/dst.h>
* @skc_reuse: %SO_REUSEADDR setting
* @skc_bound_dev_if: bound device index if != 0
* @skc_node: main hash linkage for various protocol lookup tables
+ * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
* @skc_bind_node: bind hash linkage for various protocol lookup tables
* @skc_refcnt: reference count
* @skc_hash: hash value used with various protocol lookup tables
volatile unsigned char skc_state;
unsigned char skc_reuse;
int skc_bound_dev_if;
- struct hlist_node skc_node;
+ union {
+ struct hlist_node skc_node;
+ struct hlist_nulls_node skc_nulls_node;
+ };
struct hlist_node skc_bind_node;
atomic_t skc_refcnt;
unsigned int skc_hash;
#define sk_reuse __sk_common.skc_reuse
#define sk_bound_dev_if __sk_common.skc_bound_dev_if
#define sk_node __sk_common.skc_node
+#define sk_nulls_node __sk_common.skc_nulls_node
#define sk_bind_node __sk_common.skc_bind_node
#define sk_refcnt __sk_common.skc_refcnt
#define sk_hash __sk_common.skc_hash
int sk_sndbuf;
struct sk_buff_head sk_receive_queue;
struct sk_buff_head sk_write_queue;
+#ifdef CONFIG_NET_DMA
struct sk_buff_head sk_async_wait_queue;
+#endif
int sk_wmem_queued;
int sk_forward_alloc;
gfp_t sk_allocation;
return hlist_empty(head) ? NULL : __sk_head(head);
}
+static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
+{
+ return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
+}
+
+static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
+{
+ return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
+}
+
static inline struct sock *sk_next(const struct sock *sk)
{
return sk->sk_node.next ?
hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
}
+static inline struct sock *sk_nulls_next(const struct sock *sk)
+{
+ return (!is_a_nulls(sk->sk_nulls_node.next)) ?
+ hlist_nulls_entry(sk->sk_nulls_node.next,
+ struct sock, sk_nulls_node) :
+ NULL;
+}
+
static inline int sk_unhashed(const struct sock *sk)
{
return hlist_unhashed(&sk->sk_node);
node->pprev = NULL;
}
+static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
+{
+ node->pprev = NULL;
+}
+
static __inline__ void __sk_del_node(struct sock *sk)
{
__hlist_del(&sk->sk_node);
return rc;
}
-static __inline__ int __sk_del_node_init_rcu(struct sock *sk)
+static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
{
if (sk_hashed(sk)) {
- hlist_del_init_rcu(&sk->sk_node);
+ hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
return 1;
}
return 0;
}
-static __inline__ int sk_del_node_init_rcu(struct sock *sk)
+static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
{
- int rc = __sk_del_node_init_rcu(sk);
+ int rc = __sk_nulls_del_node_init_rcu(sk);
if (rc) {
/* paranoid for a while -acme */
__sk_add_node(sk, list);
}
-static __inline__ void __sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
- hlist_add_head_rcu(&sk->sk_node, list);
+ hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}
-static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
- __sk_add_node_rcu(sk, list);
+ __sk_nulls_add_node_rcu(sk, list);
}
static __inline__ void __sk_del_bind_node(struct sock *sk)
#define sk_for_each(__sk, node, list) \
hlist_for_each_entry(__sk, node, list, sk_node)
-#define sk_for_each_rcu_safenext(__sk, node, list, next) \
- hlist_for_each_entry_rcu_safenext(__sk, node, list, sk_node, next)
+#define sk_nulls_for_each(__sk, node, list) \
+ hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
+#define sk_nulls_for_each_rcu(__sk, node, list) \
+ hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
#define sk_for_each_from(__sk, node) \
if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
hlist_for_each_entry_from(__sk, node, sk_node)
+#define sk_nulls_for_each_from(__sk, node) \
+ if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
+ hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
#define sk_for_each_continue(__sk, node) \
if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
hlist_for_each_entry_continue(__sk, node, sk_node)
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
atomic_t *memory_allocated; /* Current allocated memory. */
- atomic_t *sockets_allocated; /* Current number of sockets. */
+ struct percpu_counter *sockets_allocated; /* Current number of sockets. */
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
unsigned int obj_size;
int slab_flags;
- atomic_t *orphan_count;
+ struct percpu_counter *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
*/
#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
do { \
- sk->sk_lock.owned = 0; \
+ sk->sk_lock.owned = 0; \
init_waitqueue_head(&sk->sk_lock.wq); \
spin_lock_init(&(sk)->sk_lock.slock); \
debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
unsigned long size,
int noblock,
int *errcode);
+extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
+ unsigned long header_len,
+ unsigned long data_len,
+ int noblock,
+ int *errcode);
extern void *sock_kmalloc(struct sock *sk, int size,
gfp_t priority);
extern void sock_kfree_s(struct sock *sk, void *mem, int size);
static inline gfp_t gfp_any(void)
{
- return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
static inline long sock_rcvtimeo(const struct sock *sk, int noblock)