#ifndef _SOCK_H
#define _SOCK_H
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
+#include <linux/pcounter.h>
#include <linux/skbuff.h> /* struct sk_buff */
#include <linux/mm.h>
#include <linux/security.h>
* between user contexts and software interrupt processing, whereas the
* mini-semaphore synchronizes multiple users amongst themselves.
*/
-struct sock_iocb;
typedef struct {
spinlock_t slock;
- struct sock_iocb *owner;
+ int owned;
wait_queue_head_t wq;
/*
* We express the mutex-alike socket_lock semantics
struct sock;
struct proto;
+struct net;
/**
* struct sock_common - minimal network layer representation of sockets
* @skc_refcnt: reference count
* @skc_hash: hash value used with various protocol lookup tables
* @skc_prot: protocol handlers inside a network family
+ * @skc_net: reference to the network namespace of this socket
*
* This is the minimal network layer representation of sockets, the header
* for struct sock and struct inet_timewait_sock.
atomic_t skc_refcnt;
unsigned int skc_hash;
struct proto *skc_prot;
+ struct net *skc_net;
};
/**
* @sk_forward_alloc: space allocated forward
* @sk_allocation: allocation mode
* @sk_sndbuf: size of send buffer in bytes
- * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
+ * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
+ * %SO_OOBINLINE settings
* @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
* @sk_error_queue: rarely used
- * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
+ * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
+ * IPV6_ADDRFORM for instance)
* @sk_err: last error
- * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
+ * @sk_err_soft: errors that don't cause failure but are the cause of a
+ * persistent failure not just 'timed out'
+ * @sk_drops: raw drops counter
* @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen()
* @sk_priority: %SO_PRIORITY setting
#define sk_refcnt __sk_common.skc_refcnt
#define sk_hash __sk_common.skc_hash
#define sk_prot __sk_common.skc_prot
+#define sk_net __sk_common.skc_net
unsigned char sk_shutdown : 2,
sk_no_check : 2,
sk_userlocks : 4;
atomic_t sk_rmem_alloc;
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
+ int sk_sndbuf;
struct sk_buff_head sk_receive_queue;
struct sk_buff_head sk_write_queue;
struct sk_buff_head sk_async_wait_queue;
int sk_wmem_queued;
int sk_forward_alloc;
gfp_t sk_allocation;
- int sk_sndbuf;
int sk_route_caps;
int sk_gso_type;
int sk_rcvlowat;
rwlock_t sk_callback_lock;
int sk_err,
sk_err_soft;
+ atomic_t sk_drops;
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
__u32 sk_sndmsg_off;
int sk_write_pending;
void *sk_security;
+ __u32 sk_mark;
+ /* XXX 4 bytes hole on 64 bit */
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk, int bytes);
void (*sk_write_space)(struct sock *sk);
SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
SOCK_DBG, /* %SO_DEBUG setting */
SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
+ SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
};
*/
static inline int sk_stream_min_wspace(struct sock *sk)
{
- return sk->sk_wmem_queued / 2;
+ return sk->sk_wmem_queued >> 1;
}
static inline int sk_stream_wspace(struct sock *sk)
return sk->sk_wmem_queued < sk->sk_sndbuf;
}
-extern void sk_stream_rfree(struct sk_buff *skb);
-
-static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
-{
- skb->sk = sk;
- skb->destructor = sk_stream_rfree;
- atomic_add(skb->truesize, &sk->sk_rmem_alloc);
- sk->sk_forward_alloc -= skb->truesize;
-}
-
-static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
-{
- skb_truesize_check(skb);
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
- sk->sk_wmem_queued -= skb->truesize;
- sk->sk_forward_alloc += skb->truesize;
- __kfree_skb(skb);
-}
-
/* The per-socket spinlock must be held here. */
static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
skb->next = NULL;
}
-#define sk_wait_event(__sk, __timeo, __condition) \
-({ int rc; \
- release_sock(__sk); \
- rc = __condition; \
- if (!rc) { \
- *(__timeo) = schedule_timeout(*(__timeo)); \
- } \
- lock_sock(__sk); \
- rc = __condition; \
- rc; \
-})
+#define sk_wait_event(__sk, __timeo, __condition) \
+ ({ int __rc; \
+ release_sock(__sk); \
+ __rc = __condition; \
+ if (!__rc) { \
+ *(__timeo) = schedule_timeout(*(__timeo)); \
+ } \
+ lock_sock(__sk); \
+ __rc = __condition; \
+ __rc; \
+ })
extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
struct request_sock_ops;
struct timewait_sock_ops;
+struct inet_hashinfo;
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
void (*unhash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
+ /* Keeping track of sockets in use */
+#ifdef CONFIG_PROC_FS
+ struct pcounter inuse;
+#endif
+
/* Memory pressure */
void (*enter_memory_pressure)(void);
atomic_t *memory_allocated; /* Current allocated memory. */
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
- * All the sk_stream_mem_schedule() is of this nature: accounting
+ * All the __sk_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency.
*/
int *memory_pressure;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
+ struct inet_hashinfo *hashinfo;
+
struct module *owner;
char name[32];
#ifdef SOCK_REFCNT_DEBUG
atomic_t socks;
#endif
- struct {
- int inuse;
- u8 __pad[SMP_CACHE_BYTES - sizeof(int)];
- } stats[NR_CPUS];
};
extern int proto_register(struct proto *prot, int alloc_slab);
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
+
+#ifdef CONFIG_PROC_FS
+# define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME)
+# define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse)
/* Called with local bh disabled */
-static __inline__ void sock_prot_inc_use(struct proto *prot)
+static inline void sock_prot_inuse_add(struct proto *prot, int inc)
{
- prot->stats[smp_processor_id()].inuse++;
+ pcounter_add(&prot->inuse, inc);
}
-
-static __inline__ void sock_prot_dec_use(struct proto *prot)
+static inline int sock_prot_inuse_init(struct proto *proto)
+{
+ return pcounter_alloc(&proto->inuse);
+}
+static inline int sock_prot_inuse_get(struct proto *proto)
+{
+ return pcounter_getval(&proto->inuse);
+}
+static inline void sock_prot_inuse_free(struct proto *proto)
+{
+ pcounter_free(&proto->inuse);
+}
+#else
+# define DEFINE_PROTO_INUSE(NAME)
+# define REF_PROTO_INUSE(NAME)
+static void inline sock_prot_inuse_add(struct proto *prot, int inc)
{
- prot->stats[smp_processor_id()].inuse--;
}
+static int inline sock_prot_inuse_init(struct proto *proto)
+{
+ return 0;
+}
+static void inline sock_prot_inuse_free(struct proto *proto)
+{
+}
+#endif
+
/* With per-bucket locks this operation is not-atomic, so that
* this version is not worse.
return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
}
-extern void __sk_stream_mem_reclaim(struct sock *sk);
-extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
+/*
+ * Functions for memory accounting
+ */
+extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
+extern void __sk_mem_reclaim(struct sock *sk);
-#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
+#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
+#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
+#define SK_MEM_SEND 0
+#define SK_MEM_RECV 1
-static inline int sk_stream_pages(int amt)
+static inline int sk_mem_pages(int amt)
{
- return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM;
+ return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
}
-static inline void sk_stream_mem_reclaim(struct sock *sk)
+static inline int sk_has_account(struct sock *sk)
{
- if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
- __sk_stream_mem_reclaim(sk);
+ /* return true if protocol supports memory accounting */
+ return !!sk->sk_prot->memory_allocated;
}
-static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
+static inline int sk_wmem_schedule(struct sock *sk, int size)
{
- return (int)skb->truesize <= sk->sk_forward_alloc ||
- sk_stream_mem_schedule(sk, skb->truesize, 1);
+ if (!sk_has_account(sk))
+ return 1;
+ return size <= sk->sk_forward_alloc ||
+ __sk_mem_schedule(sk, size, SK_MEM_SEND);
}
-static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
+static inline int sk_rmem_schedule(struct sock *sk, int size)
{
+ if (!sk_has_account(sk))
+ return 1;
return size <= sk->sk_forward_alloc ||
- sk_stream_mem_schedule(sk, size, 0);
+ __sk_mem_schedule(sk, size, SK_MEM_RECV);
+}
+
+static inline void sk_mem_reclaim(struct sock *sk)
+{
+ if (!sk_has_account(sk))
+ return;
+ if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk);
+}
+
+static inline void sk_mem_reclaim_partial(struct sock *sk)
+{
+ if (!sk_has_account(sk))
+ return;
+ if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk);
+}
+
+static inline void sk_mem_charge(struct sock *sk, int size)
+{
+ if (!sk_has_account(sk))
+ return;
+ sk->sk_forward_alloc -= size;
+}
+
+static inline void sk_mem_uncharge(struct sock *sk, int size)
+{
+ if (!sk_has_account(sk))
+ return;
+ sk->sk_forward_alloc += size;
+}
+
+static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
+{
+ skb_truesize_check(skb);
+ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+ sk->sk_wmem_queued -= skb->truesize;
+ sk_mem_uncharge(sk, skb->truesize);
+ __kfree_skb(skb);
}
/* Used by processes to "lock" a socket state, so that
* Since ~2.3.5 it is also exclusive sleep lock serializing
* accesses from user process context.
*/
-#define sock_owned_by_user(sk) ((sk)->sk_lock.owner)
+#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
/*
* Macro so as to not evaluate some arguments when
*/
#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
do { \
- sk->sk_lock.owner = NULL; \
+ sk->sk_lock.owned = 0; \
init_waitqueue_head(&sk->sk_lock.wq); \
spin_lock_init(&(sk)->sk_lock.slock); \
debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
-extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
+extern void lock_sock_nested(struct sock *sk, int subclass);
static inline void lock_sock(struct sock *sk)
{
lock_sock_nested(sk, 0);
}
-extern void FASTCALL(release_sock(struct sock *sk));
+extern void release_sock(struct sock *sk);
/* BH context may only use the following locking interface. */
#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
-extern struct sock *sk_alloc(int family,
+extern struct sock *sk_alloc(struct net *net, int family,
gfp_t priority,
- struct proto *prot, int zero_it);
+ struct proto *prot);
extern void sk_free(struct sock *sk);
extern struct sock *sk_clone(const struct sock *sk,
const gfp_t priority);
return err;
rcu_read_lock_bh();
- filter = sk->sk_filter;
+ filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = sk_run_filter(skb, filter->insns,
filter->len);
}
/**
- * sk_filter_rcu_free: Free a socket filter
- * @rcu: rcu_head that contains the sk_filter to free
- */
-static inline void sk_filter_rcu_free(struct rcu_head *rcu)
-{
- struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
- kfree(fp);
-}
-
-/**
* sk_filter_release: Release a socket filter
* @sk: socket
* @fp: filter to remove
* Remove a filter from a socket and release its resources.
*/
-static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
+static inline void sk_filter_release(struct sk_filter *fp)
+{
+ if (atomic_dec_and_test(&fp->refcnt))
+ kfree(fp);
+}
+
+static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
unsigned int size = sk_filter_len(fp);
atomic_sub(size, &sk->sk_omem_alloc);
-
- if (atomic_dec_and_test(&fp->refcnt))
- call_rcu_bh(&fp->rcu, sk_filter_rcu_free);
+ sk_filter_release(fp);
}
static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
write_unlock_bh(&sk->sk_callback_lock);
}
-static inline void sock_copy(struct sock *nsk, const struct sock *osk)
-{
-#ifdef CONFIG_SECURITY_NETWORK
- void *sptr = nsk->sk_security;
-#endif
-
- memcpy(nsk, osk, osk->sk_prot->obj_size);
-#ifdef CONFIG_SECURITY_NETWORK
- nsk->sk_security = sptr;
- security_sk_clone(osk, nsk);
-#endif
-}
-
extern int sock_i_uid(struct sock *sk);
extern unsigned long sock_i_ino(struct sock *sk);
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
}
-static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
-{
- __sk_dst_set(sk, dst);
- sk->sk_route_caps = dst->dev->features;
- if (sk->sk_route_caps & NETIF_F_GSO)
- sk->sk_route_caps |= NETIF_F_GSO_MASK;
- if (sk_can_gso(sk)) {
- if (dst->header_len)
- sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
- else
- sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
- }
-}
-
-static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
-{
- sk->sk_wmem_queued += skb->truesize;
- sk->sk_forward_alloc -= skb->truesize;
-}
+extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
static inline int skb_copy_to_page(struct sock *sk, char __user *from,
struct sk_buff *skb, struct page *page,
skb->data_len += copy;
skb->truesize += copy;
sk->sk_wmem_queued += copy;
- sk->sk_forward_alloc -= copy;
+ sk_mem_charge(sk, copy);
return 0;
}
skb->sk = sk;
skb->destructor = sock_rfree;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ sk_mem_charge(sk, skb->truesize);
}
extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
static inline void sk_stream_moderate_sndbuf(struct sock *sk)
{
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
- sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
+ sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
}
}
-static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
- int size, int mem,
- gfp_t gfp)
-{
- struct sk_buff *skb;
- int hdr_len;
-
- hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
- skb = alloc_skb_fclone(size + hdr_len, gfp);
- if (skb) {
- skb->truesize += mem;
- if (sk_stream_wmem_schedule(sk, skb->truesize)) {
- skb_reserve(skb, hdr_len);
- return skb;
- }
- __kfree_skb(skb);
- } else {
- sk->sk_prot->enter_memory_pressure();
- sk_stream_moderate_sndbuf(sk);
- }
- return NULL;
-}
-
-static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
- int size,
- gfp_t gfp)
-{
- return sk_stream_alloc_pskb(sk, size, 0, gfp);
-}
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
static inline struct page *sk_stream_alloc_page(struct sock *sk)
{
*/
static inline int sock_writeable(const struct sock *sk)
{
- return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
+ return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
}
static inline gfp_t gfp_any(void)
return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
}
+extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
+
static __inline__ void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
ktime_t kt = skb->tstamp;
- if (sock_flag(sk, SOCK_RCVTSTAMP)) {
- struct timeval tv;
- /* Race occurred between timestamp enabling and packet
- receiving. Fill in the current time for now. */
- if (kt.tv64 == 0)
- kt = ktime_get_real();
- skb->tstamp = kt;
- tv = ktime_to_timeval(kt);
- put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(tv), &tv);
- } else
+ if (sock_flag(sk, SOCK_RCVTSTAMP))
+ __sock_recv_timestamp(msg, sk, skb);
+ else
sk->sk_stamp = kt;
}
lock_sock(sk); \
}
-static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
-{
- if (valbool)
- sock_set_flag(sk, bit);
- else
- sock_reset_flag(sk, bit);
-}
-
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
-#ifdef CONFIG_NET
-int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
-#else
-static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- return -ENODEV;
-}
-#endif
-
extern void sk_init(void);
-#ifdef CONFIG_SYSCTL
-extern struct ctl_table core_table[];
-#endif
-
extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;