X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=net%2Fcore%2Fsock.c;h=ed2afdb9ea2df7317794e31a1f034a605e0d9c7d;hb=f4ffaa452e71495a06376f12f772342bc57051fc;hp=5c2f72fa101323f69b027220bcbf76f2dc2cf0ab;hpb=b453257f057b834fdf9f4a6ad6133598b79bd982;p=safe%2Fjmp%2Flinux-2.6 diff --git a/net/core/sock.c b/net/core/sock.c index 5c2f72f..ed2afdb 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -9,7 +9,7 @@ * * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $ * - * Authors: Ross Biro, + * Authors: Ross Biro * Fred N. van Kempen, * Florian La Roche, * Alan Cox, @@ -91,6 +91,7 @@ * 2 of the License, or (at your option) any later version. */ +#include #include #include #include @@ -118,6 +119,7 @@ #include #include #include +#include #include #include #include @@ -185,6 +187,99 @@ static void sock_disable_timestamp(struct sock *sk) } +int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +{ + int err = 0; + int skb_len; + + /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces + number of warnings when compiling with -W --ANK + */ + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= + (unsigned)sk->sk_rcvbuf) { + err = -ENOMEM; + goto out; + } + + /* It would be deadlock, if sock_queue_rcv_skb is used + with socket lock! We assume that users of this + function are lock free. + */ + err = sk_filter(sk, skb, 1); + if (err) + goto out; + + skb->dev = NULL; + skb_set_owner_r(skb, sk); + + /* Cache the SKB length before we tack it onto the receive + * queue. Once it is added it no longer belongs to us and + * may be freed by other threads of control pulling packets + * from the queue. + */ + skb_len = skb->len; + + skb_queue_tail(&sk->sk_receive_queue, skb); + + if (!sock_flag(sk, SOCK_DEAD)) + sk->sk_data_ready(sk, skb_len); +out: + return err; +} +EXPORT_SYMBOL(sock_queue_rcv_skb); + +int sk_receive_skb(struct sock *sk, struct sk_buff *skb) +{ + int rc = NET_RX_SUCCESS; + + if (sk_filter(sk, skb, 0)) + goto discard_and_relse; + + skb->dev = NULL; + + bh_lock_sock(sk); + if (!sock_owned_by_user(sk)) + rc = sk->sk_backlog_rcv(sk, skb); + else + sk_add_backlog(sk, skb); + bh_unlock_sock(sk); +out: + sock_put(sk); + return rc; +discard_and_relse: + kfree_skb(skb); + goto out; +} +EXPORT_SYMBOL(sk_receive_skb); + +struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) +{ + struct dst_entry *dst = sk->sk_dst_cache; + + if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { + sk->sk_dst_cache = NULL; + dst_release(dst); + return NULL; + } + + return dst; +} +EXPORT_SYMBOL(__sk_dst_check); + +struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) +{ + struct dst_entry *dst = sk_dst_get(sk); + + if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { + sk_dst_reset(sk); + dst_release(dst); + return NULL; + } + + return dst; +} +EXPORT_SYMBOL(sk_dst_check); + /* * This is meant for all protocols to use and covers goings on * at the socket level. Everything here is generic. @@ -205,13 +300,14 @@ int sock_setsockopt(struct socket *sock, int level, int optname, */ #ifdef SO_DONTLINGER /* Compatibility item... */ - switch (optname) { - case SO_DONTLINGER: - sock_reset_flag(sk, SOCK_LINGER); - return 0; + if (optname == SO_DONTLINGER) { + lock_sock(sk); + sock_reset_flag(sk, SOCK_LINGER); + release_sock(sk); + return 0; } -#endif - +#endif + if(optlen sysctl_wmem_max) val = sysctl_wmem_max; - +set_sndbuf: sk->sk_userlocks |= SOCK_SNDBUF_LOCK; if ((val * 2) < SOCK_MIN_SNDBUF) sk->sk_sndbuf = SOCK_MIN_SNDBUF; @@ -272,6 +368,13 @@ int sock_setsockopt(struct socket *sock, int level, int optname, sk->sk_write_space(sk); break; + case SO_SNDBUFFORCE: + if (!capable(CAP_NET_ADMIN)) { + ret = -EPERM; + break; + } + goto set_sndbuf; + case SO_RCVBUF: /* Don't error on this BSD doesn't and if you think about it this is right. Otherwise apps have to @@ -280,15 +383,36 @@ int sock_setsockopt(struct socket *sock, int level, int optname, if (val > sysctl_rmem_max) val = sysctl_rmem_max; - +set_rcvbuf: sk->sk_userlocks |= SOCK_RCVBUF_LOCK; - /* FIXME: is this lower bound the right one? */ + /* + * We double it on the way in to account for + * "struct sk_buff" etc. overhead. Applications + * assume that the SO_RCVBUF setting they make will + * allow that much actual data to be received on that + * socket. + * + * Applications are unaware that "struct sk_buff" and + * other overheads allocate from the receive buffer + * during socket buffer allocation. + * + * And after considering the possible alternatives, + * returning the value we actually used in getsockopt + * is the most desirable behavior. + */ if ((val * 2) < SOCK_MIN_RCVBUF) sk->sk_rcvbuf = SOCK_MIN_RCVBUF; else sk->sk_rcvbuf = val * 2; break; + case SO_RCVBUFFORCE: + if (!capable(CAP_NET_ADMIN)) { + ret = -EPERM; + break; + } + goto set_rcvbuf; + case SO_KEEPALIVE: #ifdef CONFIG_INET if (sk->sk_protocol == IPPROTO_TCP) @@ -325,11 +449,11 @@ int sock_setsockopt(struct socket *sock, int level, int optname, sock_reset_flag(sk, SOCK_LINGER); else { #if (BITS_PER_LONG == 32) - if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) + if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; else #endif - sk->sk_lingertime = ling.l_linger * HZ; + sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; sock_set_flag(sk, SOCK_LINGER); } break; @@ -387,8 +511,9 @@ int sock_setsockopt(struct socket *sock, int level, int optname, if (!valbool) { sk->sk_bound_dev_if = 0; } else { - if (optlen > IFNAMSIZ) - optlen = IFNAMSIZ; + if (optlen > IFNAMSIZ - 1) + optlen = IFNAMSIZ - 1; + memset(devname, 0, sizeof(devname)); if (copy_from_user(devname, optval, optlen)) { ret = -EFAULT; break; @@ -599,7 +724,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_PEERSEC: - return security_socket_getpeersec(sock, optval, optlen, len); + return security_socket_getpeersec_stream(sock, optval, optlen, len); default: return(-ENOPROTOOPT); @@ -616,12 +741,13 @@ lenout: /** * sk_alloc - All socket objects are allocated here - * @family - protocol family - * @priority - for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) - * @prot - struct proto associated with this new sock instance - * @zero_it - if we should zero the newly allocated sock + * @family: protocol family + * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) + * @prot: struct proto associated with this new sock instance + * @zero_it: if we should zero the newly allocated sock */ -struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it) +struct sock *sk_alloc(int family, gfp_t priority, + struct proto *prot, int zero_it) { struct sock *sk = NULL; kmem_cache_t *slab = prot->slab; @@ -635,26 +761,34 @@ struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it) if (zero_it) { memset(sk, 0, prot->obj_size); sk->sk_family = family; - sk->sk_prot = prot; + /* + * See comment in struct sock definition to understand + * why we need sk_prot_creator -acme + */ + sk->sk_prot = sk->sk_prot_creator = prot; sock_lock_init(sk); } - if (security_sk_alloc(sk, family, priority)) { - if (slab != NULL) - kmem_cache_free(slab, sk); - else - kfree(sk); - sk = NULL; - } else - __module_get(prot->owner); + if (security_sk_alloc(sk, family, priority)) + goto out_free; + + if (!try_module_get(prot->owner)) + goto out_free; } return sk; + +out_free: + if (slab != NULL) + kmem_cache_free(slab, sk); + else + kfree(sk); + return NULL; } void sk_free(struct sock *sk) { struct sk_filter *filter; - struct module *owner = sk->sk_prot->owner; + struct module *owner = sk->sk_prot_creator->owner; if (sk->sk_destruct) sk->sk_destruct(sk); @@ -672,13 +806,87 @@ void sk_free(struct sock *sk) __FUNCTION__, atomic_read(&sk->sk_omem_alloc)); security_sk_free(sk); - if (sk->sk_prot->slab != NULL) - kmem_cache_free(sk->sk_prot->slab, sk); + if (sk->sk_prot_creator->slab != NULL) + kmem_cache_free(sk->sk_prot_creator->slab, sk); else kfree(sk); module_put(owner); } +struct sock *sk_clone(const struct sock *sk, const gfp_t priority) +{ + struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); + + if (newsk != NULL) { + struct sk_filter *filter; + + memcpy(newsk, sk, sk->sk_prot->obj_size); + + /* SANITY */ + sk_node_init(&newsk->sk_node); + sock_lock_init(newsk); + bh_lock_sock(newsk); + + atomic_set(&newsk->sk_rmem_alloc, 0); + atomic_set(&newsk->sk_wmem_alloc, 0); + atomic_set(&newsk->sk_omem_alloc, 0); + skb_queue_head_init(&newsk->sk_receive_queue); + skb_queue_head_init(&newsk->sk_write_queue); + + rwlock_init(&newsk->sk_dst_lock); + rwlock_init(&newsk->sk_callback_lock); + + newsk->sk_dst_cache = NULL; + newsk->sk_wmem_queued = 0; + newsk->sk_forward_alloc = 0; + newsk->sk_send_head = NULL; + newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; + newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; + + sock_reset_flag(newsk, SOCK_DONE); + skb_queue_head_init(&newsk->sk_error_queue); + + filter = newsk->sk_filter; + if (filter != NULL) + sk_filter_charge(newsk, filter); + + if (unlikely(xfrm_sk_clone_policy(newsk))) { + /* It is still raw copy of parent, so invalidate + * destructor and make plain sk_free() */ + newsk->sk_destruct = NULL; + sk_free(newsk); + newsk = NULL; + goto out; + } + + newsk->sk_err = 0; + newsk->sk_priority = 0; + atomic_set(&newsk->sk_refcnt, 2); + + /* + * Increment the counter in the same struct proto as the master + * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that + * is the same as sk->sk_prot->socks, as this field was copied + * with memcpy). + * + * This _changes_ the previous behaviour, where + * tcp_create_openreq_child always was incrementing the + * equivalent to tcp_prot->socks (inet_sock_nr), so this have + * to be taken into account in all callers. -acme + */ + sk_refcnt_debug_inc(newsk); + newsk->sk_socket = NULL; + newsk->sk_sleep = NULL; + + if (newsk->sk_prot->sockets_allocated) + atomic_inc(newsk->sk_prot->sockets_allocated); + } +out: + return newsk; +} + +EXPORT_SYMBOL_GPL(sk_clone); + void __init sk_init(void) { if (num_physpages <= 4096) { @@ -745,7 +953,8 @@ unsigned long sock_i_ino(struct sock *sk) /* * Allocate a skb from the socket's send buffer. */ -struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority) +struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, + gfp_t priority) { if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { struct sk_buff * skb = alloc_skb(size, priority); @@ -760,7 +969,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int /* * Allocate a skb from the socket's receive buffer. */ -struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority) +struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, + gfp_t priority) { if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { struct sk_buff *skb = alloc_skb(size, priority); @@ -775,7 +985,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int /* * Allocate a memory block from the socket's option memory buffer. */ -void *sock_kmalloc(struct sock *sk, int size, int priority) +void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) { if ((unsigned)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { @@ -839,7 +1049,7 @@ static struct sk_buff *sock_alloc_send_pskb(struct sock *sk, int noblock, int *errcode) { struct sk_buff *skb; - unsigned int gfp_mask; + gfp_t gfp_mask; long timeo; int err; @@ -970,8 +1180,8 @@ static void __release_sock(struct sock *sk) /** * sk_wait_data - wait for data to arrive at sk_receive_queue - * sk - sock to wait on - * timeo - for how long + * @sk: sock to wait on + * @timeo: for how long * * Now socket state including sk->sk_err is changed only under lock, * hence we may omit checks after joining wait queue. @@ -1141,8 +1351,7 @@ static void sock_def_write_space(struct sock *sk) static void sock_def_destruct(struct sock *sk) { - if (sk->sk_protinfo) - kfree(sk->sk_protinfo); + kfree(sk->sk_protinfo); } void sk_send_sigurg(struct sock *sk) @@ -1284,6 +1493,20 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname, EXPORT_SYMBOL(sock_common_getsockopt); +#ifdef CONFIG_COMPAT +int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + + if (sk->sk_prot->compat_setsockopt != NULL) + return sk->sk_prot->compat_getsockopt(sk, level, optname, + optval, optlen); + return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); +} +EXPORT_SYMBOL(compat_sock_common_getsockopt); +#endif + int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { @@ -1313,6 +1536,20 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname, EXPORT_SYMBOL(sock_common_setsockopt); +#ifdef CONFIG_COMPAT +int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, + char __user *optval, int optlen) +{ + struct sock *sk = sock->sk; + + if (sk->sk_prot->compat_setsockopt != NULL) + return sk->sk_prot->compat_setsockopt(sk, level, optname, + optval, optlen); + return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); +} +EXPORT_SYMBOL(compat_sock_common_setsockopt); +#endif + void sk_common_release(struct sock *sk) { if (sk->sk_prot->destroy) @@ -1344,11 +1581,7 @@ void sk_common_release(struct sock *sk) xfrm_sk_free_policy(sk); -#ifdef INET_REFCNT_DEBUG - if (atomic_read(&sk->sk_refcnt) != 1) - printk(KERN_DEBUG "Destruction of the socket %p delayed, c=%d\n", - sk, atomic_read(&sk->sk_refcnt)); -#endif + sk_refcnt_debug_release(sk); sock_put(sk); } @@ -1359,6 +1592,8 @@ static LIST_HEAD(proto_list); int proto_register(struct proto *prot, int alloc_slab) { + char *request_sock_slab_name = NULL; + char *timewait_sock_slab_name; int rc = -ENOBUFS; if (alloc_slab) { @@ -1370,6 +1605,43 @@ int proto_register(struct proto *prot, int alloc_slab) prot->name); goto out; } + + if (prot->rsk_prot != NULL) { + static const char mask[] = "request_sock_%s"; + + request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); + if (request_sock_slab_name == NULL) + goto out_free_sock_slab; + + sprintf(request_sock_slab_name, mask, prot->name); + prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name, + prot->rsk_prot->obj_size, 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + + if (prot->rsk_prot->slab == NULL) { + printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", + prot->name); + goto out_free_request_sock_slab_name; + } + } + + if (prot->twsk_prot != NULL) { + static const char mask[] = "tw_sock_%s"; + + timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); + + if (timewait_sock_slab_name == NULL) + goto out_free_request_sock_slab; + + sprintf(timewait_sock_slab_name, mask, prot->name); + prot->twsk_prot->twsk_slab = + kmem_cache_create(timewait_sock_slab_name, + prot->twsk_prot->twsk_obj_size, + 0, SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (prot->twsk_prot->twsk_slab == NULL) + goto out_free_timewait_sock_slab_name; + } } write_lock(&proto_list_lock); @@ -1378,6 +1650,19 @@ int proto_register(struct proto *prot, int alloc_slab) rc = 0; out: return rc; +out_free_timewait_sock_slab_name: + kfree(timewait_sock_slab_name); +out_free_request_sock_slab: + if (prot->rsk_prot && prot->rsk_prot->slab) { + kmem_cache_destroy(prot->rsk_prot->slab); + prot->rsk_prot->slab = NULL; + } +out_free_request_sock_slab_name: + kfree(request_sock_slab_name); +out_free_sock_slab: + kmem_cache_destroy(prot->slab); + prot->slab = NULL; + goto out; } EXPORT_SYMBOL(proto_register); @@ -1385,14 +1670,29 @@ EXPORT_SYMBOL(proto_register); void proto_unregister(struct proto *prot) { write_lock(&proto_list_lock); + list_del(&prot->node); + write_unlock(&proto_list_lock); if (prot->slab != NULL) { kmem_cache_destroy(prot->slab); prot->slab = NULL; } - list_del(&prot->node); - write_unlock(&proto_list_lock); + if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { + const char *name = kmem_cache_name(prot->rsk_prot->slab); + + kmem_cache_destroy(prot->rsk_prot->slab); + kfree(name); + prot->rsk_prot->slab = NULL; + } + + if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { + const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab); + + kmem_cache_destroy(prot->twsk_prot->twsk_slab); + kfree(name); + prot->twsk_prot->twsk_slab = NULL; + } } EXPORT_SYMBOL(proto_unregister); @@ -1559,8 +1859,8 @@ EXPORT_SYMBOL(sock_wfree); EXPORT_SYMBOL(sock_wmalloc); EXPORT_SYMBOL(sock_i_uid); EXPORT_SYMBOL(sock_i_ino); -#ifdef CONFIG_SYSCTL EXPORT_SYMBOL(sysctl_optmem_max); +#ifdef CONFIG_SYSCTL EXPORT_SYMBOL(sysctl_rmem_max); EXPORT_SYMBOL(sysctl_wmem_max); #endif