4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
16 * Alan Cox : NULL return from skb_peek_copy()
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
24 * Alan Cox : Fixed write poll of non IP protocol
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
55 #include <net/checksum.h>
57 #include <net/tcp_states.h>
58 #include <trace/events/skb.h>
61 * Is a socket 'connection oriented' ?
63 static inline int connection_based(struct sock *sk)
65 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
68 static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
71 unsigned long bits = (unsigned long)key;
74 * Avoid a wakeup if event not interesting for us
76 if (bits && !(bits & (POLLIN | POLLERR)))
78 return autoremove_wake_function(wait, mode, sync, key);
83 static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
86 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
88 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
91 error = sock_error(sk);
95 if (!skb_queue_empty(&sk->sk_receive_queue))
98 /* Socket shut down? */
99 if (sk->sk_shutdown & RCV_SHUTDOWN)
102 /* Sequenced packets can come disconnected.
103 * If so we report the problem
106 if (connection_based(sk) &&
107 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
111 if (signal_pending(current))
115 *timeo_p = schedule_timeout(*timeo_p);
117 finish_wait(sk->sk_sleep, &wait);
120 error = sock_intr_errno(*timeo_p);
131 * __skb_recv_datagram - Receive a datagram skbuff
134 * @peeked: returns non-zero if this packet has been seen before
135 * @err: error code returned
137 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
138 * and possible races. This replaces identical code in packet, raw and
139 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
140 * the long standing peek and read race for datagram sockets. If you
141 * alter this routine remember it must be re-entrant.
143 * This function will lock the socket if a skb is returned, so the caller
144 * needs to unlock the socket in that case (usually by calling
147 * * It does not lock socket since today. This function is
148 * * free of race conditions. This measure should/can improve
149 * * significantly datagram socket latencies at high loads,
150 * * when data copying to user space takes lots of time.
151 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
155 * The order of the tests when we find no data waiting are specified
156 * quite explicitly by POSIX 1003.1g, don't change them without having
157 * the standard around please.
159 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
160 int *peeked, int *err)
165 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
167 int error = sock_error(sk);
172 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
175 /* Again only user level code calls this function, so nothing
176 * interrupt level will suddenly eat the receive_queue.
178 * Look at current nfs client by the way...
179 * However, this function was corrent in any case. 8)
181 unsigned long cpu_flags;
183 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
184 skb = skb_peek(&sk->sk_receive_queue);
186 *peeked = skb->peeked;
187 if (flags & MSG_PEEK) {
189 atomic_inc(&skb->users);
191 __skb_unlink(skb, &sk->sk_receive_queue);
193 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
198 /* User doesn't want to wait */
203 } while (!wait_for_packet(sk, err, &timeo));
211 EXPORT_SYMBOL(__skb_recv_datagram);
213 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
214 int noblock, int *err)
218 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
222 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
225 sk_mem_reclaim_partial(sk);
229 * skb_kill_datagram - Free a datagram skbuff forcibly
231 * @skb: datagram skbuff
234 * This function frees a datagram skbuff that was received by
235 * skb_recv_datagram. The flags argument must match the one
236 * used for skb_recv_datagram.
238 * If the MSG_PEEK flag is set, and the packet is still on the
239 * receive queue of the socket, it will be taken off the queue
240 * before it is freed.
242 * This function currently only disables BH when acquiring the
243 * sk_receive_queue lock. Therefore it must not be used in a
244 * context where that lock is acquired in an IRQ context.
246 * It returns 0 if the packet was removed by us.
249 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
253 if (flags & MSG_PEEK) {
255 spin_lock_bh(&sk->sk_receive_queue.lock);
256 if (skb == skb_peek(&sk->sk_receive_queue)) {
257 __skb_unlink(skb, &sk->sk_receive_queue);
258 atomic_dec(&skb->users);
261 spin_unlock_bh(&sk->sk_receive_queue.lock);
265 atomic_inc(&sk->sk_drops);
266 sk_mem_reclaim_partial(sk);
271 EXPORT_SYMBOL(skb_kill_datagram);
274 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
275 * @skb: buffer to copy
276 * @offset: offset in the buffer to start copying from
277 * @to: io vector to copy to
278 * @len: amount of data to copy from buffer to iovec
280 * Note: the iovec is modified during the copy.
282 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
283 struct iovec *to, int len)
285 int start = skb_headlen(skb);
286 int i, copy = start - offset;
287 struct sk_buff *frag_iter;
289 trace_skb_copy_datagram_iovec(skb, len);
295 if (memcpy_toiovec(to, skb->data + offset, copy))
297 if ((len -= copy) == 0)
302 /* Copy paged appendix. Hmm... why does this look so complicated? */
303 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
306 WARN_ON(start > offset + len);
308 end = start + skb_shinfo(skb)->frags[i].size;
309 if ((copy = end - offset) > 0) {
312 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
313 struct page *page = frag->page;
318 err = memcpy_toiovec(to, vaddr + frag->page_offset +
319 offset - start, copy);
330 skb_walk_frags(skb, frag_iter) {
333 WARN_ON(start > offset + len);
335 end = start + frag_iter->len;
336 if ((copy = end - offset) > 0) {
339 if (skb_copy_datagram_iovec(frag_iter,
343 if ((len -= copy) == 0)
357 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
358 * @skb: buffer to copy
359 * @offset: offset in the buffer to start copying from
360 * @to: io vector to copy to
361 * @to_offset: offset in the io vector to start copying to
362 * @len: amount of data to copy from buffer to iovec
364 * Returns 0 or -EFAULT.
365 * Note: the iovec is not modified during the copy.
367 int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
368 const struct iovec *to, int to_offset,
371 int start = skb_headlen(skb);
372 int i, copy = start - offset;
373 struct sk_buff *frag_iter;
379 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
381 if ((len -= copy) == 0)
387 /* Copy paged appendix. Hmm... why does this look so complicated? */
388 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
391 WARN_ON(start > offset + len);
393 end = start + skb_shinfo(skb)->frags[i].size;
394 if ((copy = end - offset) > 0) {
397 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
398 struct page *page = frag->page;
403 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
404 offset - start, to_offset, copy);
416 skb_walk_frags(skb, frag_iter) {
419 WARN_ON(start > offset + len);
421 end = start + frag_iter->len;
422 if ((copy = end - offset) > 0) {
425 if (skb_copy_datagram_const_iovec(frag_iter,
430 if ((len -= copy) == 0)
443 EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
446 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
447 * @skb: buffer to copy
448 * @offset: offset in the buffer to start copying to
449 * @from: io vector to copy to
450 * @from_offset: offset in the io vector to start copying from
451 * @len: amount of data to copy to buffer from iovec
453 * Returns 0 or -EFAULT.
454 * Note: the iovec is not modified during the copy.
456 int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
457 const struct iovec *from, int from_offset,
460 int start = skb_headlen(skb);
461 int i, copy = start - offset;
462 struct sk_buff *frag_iter;
468 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
471 if ((len -= copy) == 0)
477 /* Copy paged appendix. Hmm... why does this look so complicated? */
478 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
481 WARN_ON(start > offset + len);
483 end = start + skb_shinfo(skb)->frags[i].size;
484 if ((copy = end - offset) > 0) {
487 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
488 struct page *page = frag->page;
493 err = memcpy_fromiovecend(vaddr + frag->page_offset +
495 from, from_offset, copy);
508 skb_walk_frags(skb, frag_iter) {
511 WARN_ON(start > offset + len);
513 end = start + frag_iter->len;
514 if ((copy = end - offset) > 0) {
517 if (skb_copy_datagram_from_iovec(frag_iter,
523 if ((len -= copy) == 0)
536 EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
538 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
539 u8 __user *to, int len,
542 int start = skb_headlen(skb);
543 int i, copy = start - offset;
544 struct sk_buff *frag_iter;
552 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
556 if ((len -= copy) == 0)
563 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
566 WARN_ON(start > offset + len);
568 end = start + skb_shinfo(skb)->frags[i].size;
569 if ((copy = end - offset) > 0) {
573 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
574 struct page *page = frag->page;
579 csum2 = csum_and_copy_to_user(vaddr +
586 *csump = csum_block_add(*csump, csum2, pos);
596 skb_walk_frags(skb, frag_iter) {
599 WARN_ON(start > offset + len);
601 end = start + frag_iter->len;
602 if ((copy = end - offset) > 0) {
606 if (skb_copy_and_csum_datagram(frag_iter,
611 *csump = csum_block_add(*csump, csum2, pos);
612 if ((len -= copy) == 0)
627 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
631 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
633 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
634 netdev_rx_csum_fault(skb->dev);
635 skb->ip_summed = CHECKSUM_UNNECESSARY;
639 EXPORT_SYMBOL(__skb_checksum_complete_head);
641 __sum16 __skb_checksum_complete(struct sk_buff *skb)
643 return __skb_checksum_complete_head(skb, skb->len);
645 EXPORT_SYMBOL(__skb_checksum_complete);
648 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
650 * @hlen: hardware length
653 * Caller _must_ check that skb will fit to this iovec.
655 * Returns: 0 - success.
656 * -EINVAL - checksum failure.
657 * -EFAULT - fault during copy. Beware, in this case iovec
660 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
661 int hlen, struct iovec *iov)
664 int chunk = skb->len - hlen;
669 /* Skip filled elements.
670 * Pretty silly, look at memcpy_toiovec, though 8)
672 while (!iov->iov_len)
675 if (iov->iov_len < chunk) {
676 if (__skb_checksum_complete(skb))
678 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
681 csum = csum_partial(skb->data, hlen, skb->csum);
682 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
687 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
688 netdev_rx_csum_fault(skb->dev);
689 iov->iov_len -= chunk;
690 iov->iov_base += chunk;
700 * datagram_poll - generic datagram poll
705 * Datagram poll: Again totally generic. This also handles
706 * sequenced packet sockets providing the socket receive queue
707 * is only ever holding data ready to receive.
709 * Note: when you _don't_ use this routine for this protocol,
710 * and you use a different write policy from sock_writeable()
711 * then please supply your own write_space callback.
713 unsigned int datagram_poll(struct file *file, struct socket *sock,
716 struct sock *sk = sock->sk;
719 sock_poll_wait(file, sk->sk_sleep, wait);
722 /* exceptional events? */
723 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
725 if (sk->sk_shutdown & RCV_SHUTDOWN)
727 if (sk->sk_shutdown == SHUTDOWN_MASK)
731 if (!skb_queue_empty(&sk->sk_receive_queue) ||
732 (sk->sk_shutdown & RCV_SHUTDOWN))
733 mask |= POLLIN | POLLRDNORM;
735 /* Connection-based need to check for termination and startup */
736 if (connection_based(sk)) {
737 if (sk->sk_state == TCP_CLOSE)
739 /* connection hasn't started yet? */
740 if (sk->sk_state == TCP_SYN_SENT)
745 if (sock_writeable(sk))
746 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
748 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
753 EXPORT_SYMBOL(datagram_poll);
754 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
755 EXPORT_SYMBOL(skb_copy_datagram_iovec);
756 EXPORT_SYMBOL(skb_free_datagram);
757 EXPORT_SYMBOL(skb_recv_datagram);