4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
16 * Alan Cox : NULL return from skb_peek_copy()
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
24 * Alan Cox : Fixed write poll of non IP protocol
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
55 #include <net/checksum.h>
57 #include <net/tcp_states.h>
60 * Is a socket 'connection oriented' ?
62 static inline int connection_based(struct sock *sk)
64 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
67 static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
70 unsigned long bits = (unsigned long)key;
73 * Avoid a wakeup if event not interesting for us
75 if (bits && !(bits & (POLLIN | POLLERR)))
77 return autoremove_wake_function(wait, mode, sync, key);
82 static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
85 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
87 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
90 error = sock_error(sk);
94 if (!skb_queue_empty(&sk->sk_receive_queue))
97 /* Socket shut down? */
98 if (sk->sk_shutdown & RCV_SHUTDOWN)
101 /* Sequenced packets can come disconnected.
102 * If so we report the problem
105 if (connection_based(sk) &&
106 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
110 if (signal_pending(current))
114 *timeo_p = schedule_timeout(*timeo_p);
116 finish_wait(sk->sk_sleep, &wait);
119 error = sock_intr_errno(*timeo_p);
130 * __skb_recv_datagram - Receive a datagram skbuff
133 * @peeked: returns non-zero if this packet has been seen before
134 * @err: error code returned
136 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
137 * and possible races. This replaces identical code in packet, raw and
138 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
139 * the long standing peek and read race for datagram sockets. If you
140 * alter this routine remember it must be re-entrant.
142 * This function will lock the socket if a skb is returned, so the caller
143 * needs to unlock the socket in that case (usually by calling
146 * * It does not lock socket since today. This function is
147 * * free of race conditions. This measure should/can improve
148 * * significantly datagram socket latencies at high loads,
149 * * when data copying to user space takes lots of time.
150 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
154 * The order of the tests when we find no data waiting are specified
155 * quite explicitly by POSIX 1003.1g, don't change them without having
156 * the standard around please.
158 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
159 int *peeked, int *err)
164 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
166 int error = sock_error(sk);
171 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
174 /* Again only user level code calls this function, so nothing
175 * interrupt level will suddenly eat the receive_queue.
177 * Look at current nfs client by the way...
178 * However, this function was corrent in any case. 8)
180 unsigned long cpu_flags;
182 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
183 skb = skb_peek(&sk->sk_receive_queue);
185 *peeked = skb->peeked;
186 if (flags & MSG_PEEK) {
188 atomic_inc(&skb->users);
190 __skb_unlink(skb, &sk->sk_receive_queue);
192 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
197 /* User doesn't want to wait */
202 } while (!wait_for_packet(sk, err, &timeo));
210 EXPORT_SYMBOL(__skb_recv_datagram);
212 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
213 int noblock, int *err)
217 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
221 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
224 sk_mem_reclaim_partial(sk);
228 * skb_kill_datagram - Free a datagram skbuff forcibly
230 * @skb: datagram skbuff
233 * This function frees a datagram skbuff that was received by
234 * skb_recv_datagram. The flags argument must match the one
235 * used for skb_recv_datagram.
237 * If the MSG_PEEK flag is set, and the packet is still on the
238 * receive queue of the socket, it will be taken off the queue
239 * before it is freed.
241 * This function currently only disables BH when acquiring the
242 * sk_receive_queue lock. Therefore it must not be used in a
243 * context where that lock is acquired in an IRQ context.
245 * It returns 0 if the packet was removed by us.
248 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
252 if (flags & MSG_PEEK) {
254 spin_lock_bh(&sk->sk_receive_queue.lock);
255 if (skb == skb_peek(&sk->sk_receive_queue)) {
256 __skb_unlink(skb, &sk->sk_receive_queue);
257 atomic_dec(&skb->users);
260 spin_unlock_bh(&sk->sk_receive_queue.lock);
264 sk_mem_reclaim_partial(sk);
269 EXPORT_SYMBOL(skb_kill_datagram);
272 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
273 * @skb: buffer to copy
274 * @offset: offset in the buffer to start copying from
275 * @to: io vector to copy to
276 * @len: amount of data to copy from buffer to iovec
278 * Note: the iovec is modified during the copy.
280 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
281 struct iovec *to, int len)
283 int start = skb_headlen(skb);
284 int i, copy = start - offset;
290 if (memcpy_toiovec(to, skb->data + offset, copy))
292 if ((len -= copy) == 0)
297 /* Copy paged appendix. Hmm... why does this look so complicated? */
298 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
301 WARN_ON(start > offset + len);
303 end = start + skb_shinfo(skb)->frags[i].size;
304 if ((copy = end - offset) > 0) {
307 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
308 struct page *page = frag->page;
313 err = memcpy_toiovec(to, vaddr + frag->page_offset +
314 offset - start, copy);
325 if (skb_shinfo(skb)->frag_list) {
326 struct sk_buff *list = skb_shinfo(skb)->frag_list;
328 for (; list; list = list->next) {
331 WARN_ON(start > offset + len);
333 end = start + list->len;
334 if ((copy = end - offset) > 0) {
337 if (skb_copy_datagram_iovec(list,
341 if ((len -= copy) == 0)
356 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
357 * @skb: buffer to copy
358 * @offset: offset in the buffer to start copying from
359 * @to: io vector to copy to
360 * @to_offset: offset in the io vector to start copying to
361 * @len: amount of data to copy from buffer to iovec
363 * Returns 0 or -EFAULT.
364 * Note: the iovec is not modified during the copy.
366 int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
367 const struct iovec *to, int to_offset,
370 int start = skb_headlen(skb);
371 int i, copy = start - offset;
377 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
379 if ((len -= copy) == 0)
385 /* Copy paged appendix. Hmm... why does this look so complicated? */
386 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
389 WARN_ON(start > offset + len);
391 end = start + skb_shinfo(skb)->frags[i].size;
392 if ((copy = end - offset) > 0) {
395 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
396 struct page *page = frag->page;
401 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
402 offset - start, to_offset, copy);
414 if (skb_shinfo(skb)->frag_list) {
415 struct sk_buff *list = skb_shinfo(skb)->frag_list;
417 for (; list; list = list->next) {
420 WARN_ON(start > offset + len);
422 end = start + list->len;
423 if ((copy = end - offset) > 0) {
426 if (skb_copy_datagram_const_iovec(list,
431 if ((len -= copy) == 0)
445 EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
448 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
449 * @skb: buffer to copy
450 * @offset: offset in the buffer to start copying to
451 * @from: io vector to copy to
452 * @from_offset: offset in the io vector to start copying from
453 * @len: amount of data to copy to buffer from iovec
455 * Returns 0 or -EFAULT.
456 * Note: the iovec is not modified during the copy.
458 int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
459 const struct iovec *from, int from_offset,
462 int start = skb_headlen(skb);
463 int i, copy = start - offset;
469 if (memcpy_fromiovecend(skb->data + offset, from, 0, copy))
471 if ((len -= copy) == 0)
477 /* Copy paged appendix. Hmm... why does this look so complicated? */
478 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
481 WARN_ON(start > offset + len);
483 end = start + skb_shinfo(skb)->frags[i].size;
484 if ((copy = end - offset) > 0) {
487 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
488 struct page *page = frag->page;
493 err = memcpy_fromiovecend(vaddr + frag->page_offset +
495 from, from_offset, copy);
508 if (skb_shinfo(skb)->frag_list) {
509 struct sk_buff *list = skb_shinfo(skb)->frag_list;
511 for (; list; list = list->next) {
514 WARN_ON(start > offset + len);
516 end = start + list->len;
517 if ((copy = end - offset) > 0) {
520 if (skb_copy_datagram_from_iovec(list,
526 if ((len -= copy) == 0)
540 EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
542 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
543 u8 __user *to, int len,
546 int start = skb_headlen(skb);
548 int i, copy = start - offset;
555 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
559 if ((len -= copy) == 0)
566 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
569 WARN_ON(start > offset + len);
571 end = start + skb_shinfo(skb)->frags[i].size;
572 if ((copy = end - offset) > 0) {
576 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
577 struct page *page = frag->page;
582 csum2 = csum_and_copy_to_user(vaddr +
589 *csump = csum_block_add(*csump, csum2, pos);
599 if (skb_shinfo(skb)->frag_list) {
600 struct sk_buff *list = skb_shinfo(skb)->frag_list;
602 for (; list; list=list->next) {
605 WARN_ON(start > offset + len);
607 end = start + list->len;
608 if ((copy = end - offset) > 0) {
612 if (skb_copy_and_csum_datagram(list,
617 *csump = csum_block_add(*csump, csum2, pos);
618 if ((len -= copy) == 0)
634 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
638 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
640 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
641 netdev_rx_csum_fault(skb->dev);
642 skb->ip_summed = CHECKSUM_UNNECESSARY;
646 EXPORT_SYMBOL(__skb_checksum_complete_head);
648 __sum16 __skb_checksum_complete(struct sk_buff *skb)
650 return __skb_checksum_complete_head(skb, skb->len);
652 EXPORT_SYMBOL(__skb_checksum_complete);
655 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
657 * @hlen: hardware length
660 * Caller _must_ check that skb will fit to this iovec.
662 * Returns: 0 - success.
663 * -EINVAL - checksum failure.
664 * -EFAULT - fault during copy. Beware, in this case iovec
667 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
668 int hlen, struct iovec *iov)
671 int chunk = skb->len - hlen;
676 /* Skip filled elements.
677 * Pretty silly, look at memcpy_toiovec, though 8)
679 while (!iov->iov_len)
682 if (iov->iov_len < chunk) {
683 if (__skb_checksum_complete(skb))
685 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
688 csum = csum_partial(skb->data, hlen, skb->csum);
689 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
694 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
695 netdev_rx_csum_fault(skb->dev);
696 iov->iov_len -= chunk;
697 iov->iov_base += chunk;
707 * datagram_poll - generic datagram poll
712 * Datagram poll: Again totally generic. This also handles
713 * sequenced packet sockets providing the socket receive queue
714 * is only ever holding data ready to receive.
716 * Note: when you _don't_ use this routine for this protocol,
717 * and you use a different write policy from sock_writeable()
718 * then please supply your own write_space callback.
720 unsigned int datagram_poll(struct file *file, struct socket *sock,
723 struct sock *sk = sock->sk;
726 poll_wait(file, sk->sk_sleep, wait);
729 /* exceptional events? */
730 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
732 if (sk->sk_shutdown & RCV_SHUTDOWN)
734 if (sk->sk_shutdown == SHUTDOWN_MASK)
738 if (!skb_queue_empty(&sk->sk_receive_queue) ||
739 (sk->sk_shutdown & RCV_SHUTDOWN))
740 mask |= POLLIN | POLLRDNORM;
742 /* Connection-based need to check for termination and startup */
743 if (connection_based(sk)) {
744 if (sk->sk_state == TCP_CLOSE)
746 /* connection hasn't started yet? */
747 if (sk->sk_state == TCP_SYN_SENT)
752 if (sock_writeable(sk))
753 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
755 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
760 EXPORT_SYMBOL(datagram_poll);
761 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
762 EXPORT_SYMBOL(skb_copy_datagram_iovec);
763 EXPORT_SYMBOL(skb_free_datagram);
764 EXPORT_SYMBOL(skb_recv_datagram);