X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Fnet%2Fsock.h;h=dc42b44c2aa14b409fc9307afb6e7458773c14aa;hb=48e4043d4529523cbc7fa8dd745bd8e2c45ce1d3;hp=324b3ea233d6000f04e4d5894b799535bb397e6e;hpb=a5b5bb9a053a973c23b867738c074acb3e80c0a0;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/net/sock.h b/include/net/sock.h index 324b3ea..dc42b44 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -40,6 +40,7 @@ #ifndef _SOCK_H #define _SOCK_H +#include #include #include #include @@ -47,6 +48,7 @@ #include #include #include /* struct sk_buff */ +#include #include #include @@ -67,17 +69,20 @@ #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ printk(KERN_DEBUG msg); } while (0) #else -#define SOCK_DEBUG(sk, msg...) do { } while (0) +/* Validate arguments and do nothing */ +static void inline int __attribute__ ((format (printf, 2, 3))) +SOCK_DEBUG(struct sock *sk, const char *msg, ...) +{ +} #endif /* This is the per-socket lock. The spinlock provides a synchronization * between user contexts and software interrupt processing, whereas the * mini-semaphore synchronizes multiple users amongst themselves. */ -struct sock_iocb; typedef struct { spinlock_t slock; - struct sock_iocb *owner; + int owned; wait_queue_head_t wq; /* * We express the mutex-alike socket_lock semantics @@ -92,6 +97,7 @@ typedef struct { struct sock; struct proto; +struct net; /** * struct sock_common - minimal network layer representation of sockets @@ -104,6 +110,7 @@ struct proto; * @skc_refcnt: reference count * @skc_hash: hash value used with various protocol lookup tables * @skc_prot: protocol handlers inside a network family + * @skc_net: reference to the network namespace of this socket * * This is the minimal network layer representation of sockets, the header * for struct sock and struct inet_timewait_sock. @@ -118,6 +125,9 @@ struct sock_common { atomic_t skc_refcnt; unsigned int skc_hash; struct proto *skc_prot; +#ifdef CONFIG_NET_NS + struct net *skc_net; +#endif }; /** @@ -141,17 +151,22 @@ struct sock_common { * @sk_forward_alloc: space allocated forward * @sk_allocation: allocation mode * @sk_sndbuf: size of send buffer in bytes - * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings + * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, + * %SO_OOBINLINE settings * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) + * @sk_gso_max_size: Maximum GSO segment size to build * @sk_lingertime: %SO_LINGER l_linger setting * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct * @sk_error_queue: rarely used - * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance) + * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, + * IPV6_ADDRFORM for instance) * @sk_err: last error - * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out' + * @sk_err_soft: errors that don't cause failure but are the cause of a + * persistent failure not just 'timed out' + * @sk_drops: raw drops counter * @sk_ack_backlog: current listen backlog * @sk_max_ack_backlog: listen backlog set in listen() * @sk_priority: %SO_PRIORITY setting @@ -171,6 +186,7 @@ struct sock_common { * @sk_sndmsg_off: cached offset for sendmsg * @sk_send_head: front of stuff to transmit * @sk_security: used by security modules + * @sk_mark: generic packet mark * @sk_write_pending: a write to stream socket waits to start * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed @@ -194,6 +210,7 @@ struct sock { #define sk_refcnt __sk_common.skc_refcnt #define sk_hash __sk_common.skc_hash #define sk_prot __sk_common.skc_prot +#define sk_net __sk_common.skc_net unsigned char sk_shutdown : 2, sk_no_check : 2, sk_userlocks : 4; @@ -201,6 +218,15 @@ struct sock { unsigned short sk_type; int sk_rcvbuf; socket_lock_t sk_lock; + /* + * The backlog queue is special, it is always used with + * the per-socket spinlock held and requires low latency + * access. Therefore we special case it's implementation. + */ + struct { + struct sk_buff *head; + struct sk_buff *tail; + } sk_backlog; wait_queue_head_t *sk_sleep; struct dst_entry *sk_dst_cache; struct xfrm_policy *sk_policy[2]; @@ -208,32 +234,25 @@ struct sock { atomic_t sk_rmem_alloc; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; + int sk_sndbuf; struct sk_buff_head sk_receive_queue; struct sk_buff_head sk_write_queue; struct sk_buff_head sk_async_wait_queue; int sk_wmem_queued; int sk_forward_alloc; gfp_t sk_allocation; - int sk_sndbuf; int sk_route_caps; int sk_gso_type; + unsigned int sk_gso_max_size; int sk_rcvlowat; unsigned long sk_flags; unsigned long sk_lingertime; - /* - * The backlog queue is special, it is always used with - * the per-socket spinlock held and requires low latency - * access. Therefore we special case it's implementation. - */ - struct { - struct sk_buff *head; - struct sk_buff *tail; - } sk_backlog; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err, sk_err_soft; + atomic_t sk_drops; unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; @@ -243,7 +262,7 @@ struct sock { struct sk_filter *sk_filter; void *sk_protinfo; struct timer_list sk_timer; - struct timeval sk_stamp; + ktime_t sk_stamp; struct socket *sk_socket; void *sk_user_data; struct page *sk_sndmsg_page; @@ -251,6 +270,8 @@ struct sock { __u32 sk_sndmsg_off; int sk_write_pending; void *sk_security; + __u32 sk_mark; + /* XXX 4 bytes hole on 64 bit */ void (*sk_state_change)(struct sock *sk); void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_write_space)(struct sock *sk); @@ -389,6 +410,7 @@ enum sock_flags { SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ SOCK_DBG, /* %SO_DEBUG setting */ SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ + SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ }; @@ -433,7 +455,7 @@ static inline int sk_acceptq_is_full(struct sock *sk) */ static inline int sk_stream_min_wspace(struct sock *sk) { - return sk->sk_wmem_queued / 2; + return sk->sk_wmem_queued >> 1; } static inline int sk_stream_wspace(struct sock *sk) @@ -448,25 +470,6 @@ static inline int sk_stream_memory_free(struct sock *sk) return sk->sk_wmem_queued < sk->sk_sndbuf; } -extern void sk_stream_rfree(struct sk_buff *skb); - -static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk) -{ - skb->sk = sk; - skb->destructor = sk_stream_rfree; - atomic_add(skb->truesize, &sk->sk_rmem_alloc); - sk->sk_forward_alloc -= skb->truesize; -} - -static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb) -{ - skb_truesize_check(skb); - sock_set_flag(sk, SOCK_QUEUE_SHRUNK); - sk->sk_wmem_queued -= skb->truesize; - sk->sk_forward_alloc += skb->truesize; - __kfree_skb(skb); -} - /* The per-socket spinlock must be held here. */ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) { @@ -479,17 +482,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) skb->next = NULL; } -#define sk_wait_event(__sk, __timeo, __condition) \ -({ int rc; \ - release_sock(__sk); \ - rc = __condition; \ - if (!rc) { \ - *(__timeo) = schedule_timeout(*(__timeo)); \ - } \ - lock_sock(__sk); \ - rc = __condition; \ - rc; \ -}) +#define sk_wait_event(__sk, __timeo, __condition) \ + ({ int __rc; \ + release_sock(__sk); \ + __rc = __condition; \ + if (!__rc) { \ + *(__timeo) = schedule_timeout(*(__timeo)); \ + } \ + lock_sock(__sk); \ + __rc = __condition; \ + __rc; \ + }) extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); @@ -501,6 +504,8 @@ extern int sk_wait_data(struct sock *sk, long *timeo); struct request_sock_ops; struct timewait_sock_ops; +struct inet_hashinfo; +struct raw_hashinfo; /* Networking protocol blocks we attach to sockets. * socket layer -> transport layer interface @@ -554,6 +559,11 @@ struct proto { void (*unhash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); + /* Keeping track of sockets in use */ +#ifdef CONFIG_PROC_FS + unsigned int inuse_idx; +#endif + /* Memory pressure */ void (*enter_memory_pressure)(void); atomic_t *memory_allocated; /* Current allocated memory. */ @@ -561,7 +571,7 @@ struct proto { /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. - * All the sk_stream_mem_schedule() is of this nature: accounting + * All the __sk_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ int *memory_pressure; @@ -570,7 +580,7 @@ struct proto { int *sysctl_rmem; int max_header; - kmem_cache_t *slab; + struct kmem_cache *slab; unsigned int obj_size; atomic_t *orphan_count; @@ -578,6 +588,12 @@ struct proto { struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; + union { + struct inet_hashinfo *hashinfo; + struct hlist_head *udp_hash; + struct raw_hashinfo *raw_hash; + } h; + struct module *owner; char name[32]; @@ -586,10 +602,6 @@ struct proto { #ifdef SOCK_REFCNT_DEBUG atomic_t socks; #endif - struct { - int inuse; - u8 __pad[SMP_CACHE_BYTES - sizeof(int)]; - } stats[NR_CPUS]; }; extern int proto_register(struct proto *prot, int alloc_slab); @@ -620,16 +632,18 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) #define sk_refcnt_debug_release(sk) do { } while (0) #endif /* SOCK_REFCNT_DEBUG */ + +#ifdef CONFIG_PROC_FS /* Called with local bh disabled */ -static __inline__ void sock_prot_inc_use(struct proto *prot) +extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); +extern int sock_prot_inuse_get(struct net *net, struct proto *proto); +#else +static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, + int inc) { - prot->stats[smp_processor_id()].inuse++; } +#endif -static __inline__ void sock_prot_dec_use(struct proto *prot) -{ - prot->stats[smp_processor_id()].inuse--; -} /* With per-bucket locks this operation is not-atomic, so that * this version is not worse. @@ -665,7 +679,6 @@ struct sock_iocb { struct sock *sk; struct scm_cookie *scm; struct msghdr *msg, async_msg; - struct iovec async_iov; struct kiocb *kiocb; }; @@ -694,41 +707,81 @@ static inline struct inode *SOCK_INODE(struct socket *socket) return &container_of(socket, struct socket_alloc, socket)->vfs_inode; } -extern void __sk_stream_mem_reclaim(struct sock *sk); -extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); +/* + * Functions for memory accounting + */ +extern int __sk_mem_schedule(struct sock *sk, int size, int kind); +extern void __sk_mem_reclaim(struct sock *sk); -#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE) +#define SK_MEM_QUANTUM ((int)PAGE_SIZE) +#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) +#define SK_MEM_SEND 0 +#define SK_MEM_RECV 1 -static inline int sk_stream_pages(int amt) +static inline int sk_mem_pages(int amt) { - return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM; + return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; } -static inline void sk_stream_mem_reclaim(struct sock *sk) +static inline int sk_has_account(struct sock *sk) { - if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) - __sk_stream_mem_reclaim(sk); + /* return true if protocol supports memory accounting */ + return !!sk->sk_prot->memory_allocated; } -static inline void sk_stream_writequeue_purge(struct sock *sk) +static inline int sk_wmem_schedule(struct sock *sk, int size) { - struct sk_buff *skb; + if (!sk_has_account(sk)) + return 1; + return size <= sk->sk_forward_alloc || + __sk_mem_schedule(sk, size, SK_MEM_SEND); +} - while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) - sk_stream_free_skb(sk, skb); - sk_stream_mem_reclaim(sk); +static inline int sk_rmem_schedule(struct sock *sk, int size) +{ + if (!sk_has_account(sk)) + return 1; + return size <= sk->sk_forward_alloc || + __sk_mem_schedule(sk, size, SK_MEM_RECV); } -static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) +static inline void sk_mem_reclaim(struct sock *sk) { - return (int)skb->truesize <= sk->sk_forward_alloc || - sk_stream_mem_schedule(sk, skb->truesize, 1); + if (!sk_has_account(sk)) + return; + if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) + __sk_mem_reclaim(sk); } -static inline int sk_stream_wmem_schedule(struct sock *sk, int size) +static inline void sk_mem_reclaim_partial(struct sock *sk) { - return size <= sk->sk_forward_alloc || - sk_stream_mem_schedule(sk, size, 0); + if (!sk_has_account(sk)) + return; + if (sk->sk_forward_alloc > SK_MEM_QUANTUM) + __sk_mem_reclaim(sk); +} + +static inline void sk_mem_charge(struct sock *sk, int size) +{ + if (!sk_has_account(sk)) + return; + sk->sk_forward_alloc -= size; +} + +static inline void sk_mem_uncharge(struct sock *sk, int size) +{ + if (!sk_has_account(sk)) + return; + sk->sk_forward_alloc += size; +} + +static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) +{ + skb_truesize_check(skb); + sock_set_flag(sk, SOCK_QUEUE_SHRUNK); + sk->sk_wmem_queued -= skb->truesize; + sk_mem_uncharge(sk, skb->truesize); + __kfree_skb(skb); } /* Used by processes to "lock" a socket state, so that @@ -744,10 +797,35 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size) * Since ~2.3.5 it is also exclusive sleep lock serializing * accesses from user process context. */ -#define sock_owned_by_user(sk) ((sk)->sk_lock.owner) +#define sock_owned_by_user(sk) ((sk)->sk_lock.owned) + +/* + * Macro so as to not evaluate some arguments when + * lockdep is not enabled. + * + * Mark both the sk_lock and the sk_lock.slock as a + * per-address-family lock class. + */ +#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ +do { \ + sk->sk_lock.owned = 0; \ + init_waitqueue_head(&sk->sk_lock.wq); \ + spin_lock_init(&(sk)->sk_lock.slock); \ + debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ + sizeof((sk)->sk_lock)); \ + lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ + (skey), (sname)); \ + lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ +} while (0) + +extern void lock_sock_nested(struct sock *sk, int subclass); + +static inline void lock_sock(struct sock *sk) +{ + lock_sock_nested(sk, 0); +} -extern void FASTCALL(lock_sock(struct sock *sk)); -extern void FASTCALL(release_sock(struct sock *sk)); +extern void release_sock(struct sock *sk); /* BH context may only use the following locking interface. */ #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) @@ -756,10 +834,11 @@ extern void FASTCALL(release_sock(struct sock *sk)); SINGLE_DEPTH_NESTING) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) -extern struct sock *sk_alloc(int family, +extern struct sock *sk_alloc(struct net *net, int family, gfp_t priority, - struct proto *prot, int zero_it); + struct proto *prot); extern void sk_free(struct sock *sk); +extern void sk_release_kernel(struct sock *sk); extern struct sock *sk_clone(const struct sock *sk, const gfp_t priority); @@ -849,62 +928,25 @@ extern void sk_common_release(struct sock *sk); extern void sock_init_data(struct socket *sock, struct sock *sk); /** - * sk_filter - run a packet through a socket filter - * @sk: sock associated with &sk_buff - * @skb: buffer to filter - * @needlock: set to 1 if the sock is not locked by caller. - * - * Run the filter code and then cut skb->data to correct size returned by - * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller - * than pkt_len we keep whole skb->data. This is the socket level - * wrapper to sk_run_filter. It returns 0 if the packet should - * be accepted or -EPERM if the packet should be tossed. - * - */ - -static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock) -{ - int err; - - err = security_sock_rcv_skb(sk, skb); - if (err) - return err; - - if (sk->sk_filter) { - struct sk_filter *filter; - - if (needlock) - bh_lock_sock(sk); - - filter = sk->sk_filter; - if (filter) { - unsigned int pkt_len = sk_run_filter(skb, filter->insns, - filter->len); - err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; - } - - if (needlock) - bh_unlock_sock(sk); - } - return err; -} - -/** * sk_filter_release: Release a socket filter * @sk: socket * @fp: filter to remove * * Remove a filter from a socket and release its resources. */ - -static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) + +static inline void sk_filter_release(struct sk_filter *fp) +{ + if (atomic_dec_and_test(&fp->refcnt)) + kfree(fp); +} + +static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) { unsigned int size = sk_filter_len(fp); atomic_sub(size, &sk->sk_omem_alloc); - - if (atomic_dec_and_test(&fp->refcnt)) - kfree(fp); + sk_filter_release(fp); } static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) @@ -945,7 +987,8 @@ static inline void sock_put(struct sock *sk) sk_free(sk); } -extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb); +extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb, + const int nested); /* Detach socket from process context. * Announce socket dead, detach it from wait queue and inode. @@ -969,6 +1012,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent) sk->sk_sleep = &parent->wait; parent->sk = sk; sk->sk_socket = parent; + security_sock_graft(sk, parent); write_unlock_bh(&sk->sk_callback_lock); } @@ -1039,25 +1083,7 @@ static inline int sk_can_gso(const struct sock *sk) return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); } -static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) -{ - __sk_dst_set(sk, dst); - sk->sk_route_caps = dst->dev->features; - if (sk->sk_route_caps & NETIF_F_GSO) - sk->sk_route_caps |= NETIF_F_GSO_MASK; - if (sk_can_gso(sk)) { - if (dst->header_len) - sk->sk_route_caps &= ~NETIF_F_GSO_MASK; - else - sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; - } -} - -static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) -{ - sk->sk_wmem_queued += skb->truesize; - sk->sk_forward_alloc -= skb->truesize; -} +extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); static inline int skb_copy_to_page(struct sock *sk, char __user *from, struct sk_buff *skb, struct page *page, @@ -1065,7 +1091,7 @@ static inline int skb_copy_to_page(struct sock *sk, char __user *from, { if (skb->ip_summed == CHECKSUM_NONE) { int err = 0; - unsigned int csum = csum_and_copy_from_user(from, + __wsum csum = csum_and_copy_from_user(from, page_address(page) + off, copy, 0, &err); if (err) @@ -1078,7 +1104,7 @@ static inline int skb_copy_to_page(struct sock *sk, char __user *from, skb->data_len += copy; skb->truesize += copy; sk->sk_wmem_queued += copy; - sk->sk_forward_alloc -= copy; + sk_mem_charge(sk, copy); return 0; } @@ -1104,6 +1130,7 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) skb->sk = sk; skb->destructor = sock_rfree; atomic_add(skb->truesize, &sk->sk_rmem_alloc); + sk_mem_charge(sk, skb->truesize); } extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, @@ -1165,40 +1192,12 @@ static inline void sk_wake_async(struct sock *sk, int how, int band) static inline void sk_stream_moderate_sndbuf(struct sock *sk) { if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { - sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2); + sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); } } -static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, - int size, int mem, - gfp_t gfp) -{ - struct sk_buff *skb; - int hdr_len; - - hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header); - skb = alloc_skb_fclone(size + hdr_len, gfp); - if (skb) { - skb->truesize += mem; - if (sk_stream_wmem_schedule(sk, skb->truesize)) { - skb_reserve(skb, hdr_len); - return skb; - } - __kfree_skb(skb); - } else { - sk->sk_prot->enter_memory_pressure(); - sk_stream_moderate_sndbuf(sk); - } - return NULL; -} - -static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, - int size, - gfp_t gfp) -{ - return sk_stream_alloc_pskb(sk, size, 0, gfp); -} +struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); static inline struct page *sk_stream_alloc_page(struct sock *sk) { @@ -1212,29 +1211,17 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) return page; } -#define sk_stream_for_retrans_queue(skb, sk) \ - for (skb = (sk)->sk_write_queue.next; \ - (skb != (sk)->sk_send_head) && \ - (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ - skb = skb->next) - -/*from STCP for fast SACK Process*/ -#define sk_stream_for_retrans_queue_from(skb, sk) \ - for (; (skb != (sk)->sk_send_head) && \ - (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ - skb = skb->next) - /* * Default write policy as shown to user space via poll/select/SIGIO */ static inline int sock_writeable(const struct sock *sk) { - return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); + return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); } static inline gfp_t gfp_any(void) { - return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; + return in_atomic() ? GFP_ATOMIC : GFP_KERNEL; } static inline long sock_rcvtimeo(const struct sock *sk, int noblock) @@ -1260,22 +1247,18 @@ static inline int sock_intr_errno(long timeo) return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; } +extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); + static __inline__ void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { - struct timeval stamp; + ktime_t kt = skb->tstamp; - skb_get_timestamp(skb, &stamp); - if (sock_flag(sk, SOCK_RCVTSTAMP)) { - /* Race occurred between timestamp enabling and packet - receiving. Fill in the current time for now. */ - if (stamp.tv_sec == 0) - do_gettimeofday(&stamp); - skb_set_timestamp(skb, &stamp); - put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval), - &stamp); - } else - sk->sk_stamp = stamp; + if (sock_flag(sk, SOCK_RCVTSTAMP)) + __sock_recv_timestamp(msg, sk, skb); + else + sk->sk_stamp = kt; } /** @@ -1304,20 +1287,49 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e } #endif +static inline +struct net *sock_net(const struct sock *sk) +{ +#ifdef CONFIG_NET_NS + return sk->sk_net; +#else + return &init_net; +#endif +} + +static inline +void sock_net_set(struct sock *sk, struct net *net) +{ +#ifdef CONFIG_NET_NS + sk->sk_net = net; +#endif +} + +/* + * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace. + * They should not hold a referrence to a namespace in order to allow + * to stop it. + * Sockets after sk_change_net should be released using sk_release_kernel + */ +static inline void sk_change_net(struct sock *sk, struct net *net) +{ + put_net(sock_net(sk)); + sock_net_set(sk, hold_net(net)); +} + extern void sock_enable_timestamp(struct sock *sk); extern int sock_get_timestamp(struct sock *, struct timeval __user *); +extern int sock_get_timestampns(struct sock *, struct timespec __user *); /* * Enable debug/info messages */ +extern int net_msg_warn; +#define NETDEBUG(fmt, args...) \ + do { if (net_msg_warn) printk(fmt,##args); } while (0) -#ifdef CONFIG_NETDEBUG -#define NETDEBUG(fmt, args...) printk(fmt,##args) -#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0) -#else -#define NETDEBUG(fmt, args...) do { } while (0) -#define LIMIT_NETDEBUG(fmt, args...) do { } while(0) -#endif +#define LIMIT_NETDEBUG(fmt, args...) \ + do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0) /* * Macros for sleeping on a socket. Use them like this: @@ -1343,32 +1355,11 @@ extern int sock_get_timestamp(struct sock *, struct timeval __user *); lock_sock(sk); \ } -static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) -{ - if (valbool) - sock_set_flag(sk, bit); - else - sock_reset_flag(sk, bit); -} - extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; -#ifdef CONFIG_NET -int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); -#else -static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) -{ - return -ENODEV; -} -#endif - extern void sk_init(void); -#ifdef CONFIG_SYSCTL -extern struct ctl_table core_table[]; -#endif - extern int sysctl_optmem_max; extern __u32 sysctl_wmem_default;