2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <net/net_namespace.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
72 #include <asm/ioctls.h>
74 #include <asm/cacheflush.h>
76 #include <linux/proc_fs.h>
77 #include <linux/seq_file.h>
78 #include <linux/poll.h>
79 #include <linux/module.h>
80 #include <linux/init.h>
81 #include <linux/mutex.h>
84 #include <net/inet_common.h>
89 - if device has no dev->hard_header routine, it adds and removes ll header
90 inside itself. In this case ll header is invisible outside of device,
91 but higher levels still should reserve dev->hard_header_len.
92 Some devices are enough clever to reallocate skb, when header
93 will not fit to reserved space (tunnel), another ones are silly
95 - packet socket receives packets with pulled ll header,
96 so that SOCK_RAW should push it back.
101 Incoming, dev->hard_header!=NULL
102 mac_header -> ll header
105 Outgoing, dev->hard_header!=NULL
106 mac_header -> ll header
109 Incoming, dev->hard_header==NULL
110 mac_header -> UNKNOWN position. It is very likely, that it points to ll
111 header. PPP makes it, that is wrong, because introduce
112 assymetry between rx and tx paths.
115 Outgoing, dev->hard_header==NULL
116 mac_header -> data. ll header is still not built!
120 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
126 dev->hard_header != NULL
127 mac_header -> ll header
130 dev->hard_header == NULL (ll header is added by device, we cannot control it)
134 We should set nh.raw on output to correct posistion,
135 packet classifier depends on it.
138 /* Private packet socket structures. */
140 struct packet_mclist {
141 struct packet_mclist *next;
146 unsigned char addr[MAX_ADDR_LEN];
148 /* identical to struct packet_mreq except it has
149 * a longer address field.
151 struct packet_mreq_max {
153 unsigned short mr_type;
154 unsigned short mr_alen;
155 unsigned char mr_address[MAX_ADDR_LEN];
158 #ifdef CONFIG_PACKET_MMAP
159 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
160 int closing, int tx_ring);
162 struct packet_ring_buffer {
165 unsigned int frames_per_block;
166 unsigned int frame_size;
167 unsigned int frame_max;
169 unsigned int pg_vec_order;
170 unsigned int pg_vec_pages;
171 unsigned int pg_vec_len;
177 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
180 static void packet_flush_mclist(struct sock *sk);
183 /* struct sock has to be the first member of packet_sock */
185 struct tpacket_stats stats;
186 #ifdef CONFIG_PACKET_MMAP
187 struct packet_ring_buffer rx_ring;
188 struct packet_ring_buffer tx_ring;
191 struct packet_type prot_hook;
192 spinlock_t bind_lock;
193 struct mutex pg_vec_lock;
194 unsigned int running:1, /* prot_hook is attached*/
197 int ifindex; /* bound device */
199 struct packet_mclist *mclist;
200 #ifdef CONFIG_PACKET_MMAP
202 enum tpacket_versions tp_version;
203 unsigned int tp_hdrlen;
204 unsigned int tp_reserve;
205 unsigned int tp_loss:1;
209 struct packet_skb_cb {
210 unsigned int origlen;
212 struct sockaddr_pkt pkt;
213 struct sockaddr_ll ll;
217 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
219 #ifdef CONFIG_PACKET_MMAP
221 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
224 struct tpacket_hdr *h1;
225 struct tpacket2_hdr *h2;
230 switch (po->tp_version) {
232 h.h1->tp_status = status;
233 flush_dcache_page(virt_to_page(&h.h1->tp_status));
236 h.h2->tp_status = status;
237 flush_dcache_page(virt_to_page(&h.h2->tp_status));
240 pr_err("TPACKET version not supported\n");
247 static int __packet_get_status(struct packet_sock *po, void *frame)
250 struct tpacket_hdr *h1;
251 struct tpacket2_hdr *h2;
258 switch (po->tp_version) {
260 flush_dcache_page(virt_to_page(&h.h1->tp_status));
261 return h.h1->tp_status;
263 flush_dcache_page(virt_to_page(&h.h2->tp_status));
264 return h.h2->tp_status;
266 pr_err("TPACKET version not supported\n");
272 static void *packet_lookup_frame(struct packet_sock *po,
273 struct packet_ring_buffer *rb,
274 unsigned int position,
277 unsigned int pg_vec_pos, frame_offset;
279 struct tpacket_hdr *h1;
280 struct tpacket2_hdr *h2;
284 pg_vec_pos = position / rb->frames_per_block;
285 frame_offset = position % rb->frames_per_block;
287 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
289 if (status != __packet_get_status(po, h.raw))
295 static inline void *packet_current_frame(struct packet_sock *po,
296 struct packet_ring_buffer *rb,
299 return packet_lookup_frame(po, rb, rb->head, status);
302 static inline void *packet_previous_frame(struct packet_sock *po,
303 struct packet_ring_buffer *rb,
306 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
307 return packet_lookup_frame(po, rb, previous, status);
310 static inline void packet_increment_head(struct packet_ring_buffer *buff)
312 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
317 static inline struct packet_sock *pkt_sk(struct sock *sk)
319 return (struct packet_sock *)sk;
322 static void packet_sock_destruct(struct sock *sk)
324 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
325 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
327 if (!sock_flag(sk, SOCK_DEAD)) {
328 pr_err("Attempt to release alive packet socket: %p\n", sk);
332 sk_refcnt_debug_dec(sk);
336 static const struct proto_ops packet_ops;
338 static const struct proto_ops packet_ops_spkt;
340 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
341 struct packet_type *pt, struct net_device *orig_dev)
344 struct sockaddr_pkt *spkt;
347 * When we registered the protocol we saved the socket in the data
348 * field for just this event.
351 sk = pt->af_packet_priv;
354 * Yank back the headers [hope the device set this
355 * right or kerboom...]
357 * Incoming packets have ll header pulled,
360 * For outgoing ones skb->data == skb_mac_header(skb)
361 * so that this procedure is noop.
364 if (skb->pkt_type == PACKET_LOOPBACK)
367 if (dev_net(dev) != sock_net(sk))
370 skb = skb_share_check(skb, GFP_ATOMIC);
374 /* drop any routing info */
377 /* drop conntrack reference */
380 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
382 skb_push(skb, skb->data - skb_mac_header(skb));
385 * The SOCK_PACKET socket receives _all_ frames.
388 spkt->spkt_family = dev->type;
389 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
390 spkt->spkt_protocol = skb->protocol;
393 * Charge the memory to the socket. This is done specifically
394 * to prevent sockets using all the memory up.
397 if (sock_queue_rcv_skb(sk, skb) == 0)
408 * Output a raw packet to a device layer. This bypasses all the other
409 * protocol layers and you must therefore supply it with a complete frame
412 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
413 struct msghdr *msg, size_t len)
415 struct sock *sk = sock->sk;
416 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
418 struct net_device *dev;
423 * Get and verify the address.
427 if (msg->msg_namelen < sizeof(struct sockaddr))
429 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
430 proto = saddr->spkt_protocol;
432 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
435 * Find the device first to size check it
438 saddr->spkt_device[13] = 0;
439 dev = dev_get_by_name(sock_net(sk), saddr->spkt_device);
445 if (!(dev->flags & IFF_UP))
449 * You may not queue a frame bigger than the mtu. This is the lowest level
450 * raw protocol and you must do your own fragmentation at this level.
454 if (len > dev->mtu + dev->hard_header_len)
458 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
461 * If the write buffer is full, then tough. At this level the user
462 * gets to deal with the problem - do your own algorithmic backoffs.
463 * That's far more flexible.
473 /* FIXME: Save some space for broken drivers that write a
474 * hard header at transmission time by themselves. PPP is the
475 * notable one here. This should really be fixed at the driver level.
477 skb_reserve(skb, LL_RESERVED_SPACE(dev));
478 skb_reset_network_header(skb);
480 /* Try to align data part correctly */
481 if (dev->header_ops) {
482 skb->data -= dev->hard_header_len;
483 skb->tail -= dev->hard_header_len;
484 if (len < dev->hard_header_len)
485 skb_reset_network_header(skb);
488 /* Returns -EFAULT on error */
489 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
490 skb->protocol = proto;
492 skb->priority = sk->sk_priority;
512 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
515 struct sk_filter *filter;
518 filter = rcu_dereference(sk->sk_filter);
520 res = sk_run_filter(skb, filter->insns, filter->len);
521 rcu_read_unlock_bh();
527 * If we've lost frames since the last time we queued one to the
528 * sk_receive_queue, we need to record it here.
529 * This must be called under the protection of the socket lock
530 * to prevent racing with other softirqs and user space
532 static inline void record_packet_gap(struct sk_buff *skb,
533 struct packet_sock *po)
536 * We overload the mark field here, since we're about
537 * to enqueue to a receive queue and no body else will
538 * use this field at this point
540 skb->mark = po->stats.tp_gap;
541 po->stats.tp_gap = 0;
546 static inline __u32 check_packet_gap(struct sk_buff *skb)
552 This function makes lazy skb cloning in hope that most of packets
553 are discarded by BPF.
555 Note tricky part: we DO mangle shared skb! skb->data, skb->len
556 and skb->cb are mangled. It works because (and until) packets
557 falling here are owned by current CPU. Output packets are cloned
558 by dev_queue_xmit_nit(), input packets are processed by net_bh
559 sequencially, so that if we return skb to original state on exit,
560 we will not harm anyone.
563 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
564 struct packet_type *pt, struct net_device *orig_dev)
567 struct sockaddr_ll *sll;
568 struct packet_sock *po;
569 u8 *skb_head = skb->data;
570 int skb_len = skb->len;
571 unsigned int snaplen, res;
573 if (skb->pkt_type == PACKET_LOOPBACK)
576 sk = pt->af_packet_priv;
579 if (dev_net(dev) != sock_net(sk))
584 if (dev->header_ops) {
585 /* The device has an explicit notion of ll header,
586 exported to higher levels.
588 Otherwise, the device hides datails of it frame
589 structure, so that corresponding packet head
590 never delivered to user.
592 if (sk->sk_type != SOCK_DGRAM)
593 skb_push(skb, skb->data - skb_mac_header(skb));
594 else if (skb->pkt_type == PACKET_OUTGOING) {
595 /* Special case: outgoing packets have ll header at head */
596 skb_pull(skb, skb_network_offset(skb));
602 res = run_filter(skb, sk, snaplen);
608 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
609 (unsigned)sk->sk_rcvbuf)
612 if (skb_shared(skb)) {
613 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
617 if (skb_head != skb->data) {
618 skb->data = skb_head;
625 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
628 sll = &PACKET_SKB_CB(skb)->sa.ll;
629 sll->sll_family = AF_PACKET;
630 sll->sll_hatype = dev->type;
631 sll->sll_protocol = skb->protocol;
632 sll->sll_pkttype = skb->pkt_type;
633 if (unlikely(po->origdev))
634 sll->sll_ifindex = orig_dev->ifindex;
636 sll->sll_ifindex = dev->ifindex;
638 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
640 PACKET_SKB_CB(skb)->origlen = skb->len;
642 if (pskb_trim(skb, snaplen))
645 skb_set_owner_r(skb, sk);
649 /* drop conntrack reference */
652 spin_lock(&sk->sk_receive_queue.lock);
653 po->stats.tp_packets++;
654 record_packet_gap(skb, po);
655 __skb_queue_tail(&sk->sk_receive_queue, skb);
656 spin_unlock(&sk->sk_receive_queue.lock);
657 sk->sk_data_ready(sk, skb->len);
661 spin_lock(&sk->sk_receive_queue.lock);
662 po->stats.tp_drops++;
664 spin_unlock(&sk->sk_receive_queue.lock);
667 if (skb_head != skb->data && skb_shared(skb)) {
668 skb->data = skb_head;
676 #ifdef CONFIG_PACKET_MMAP
677 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
678 struct packet_type *pt, struct net_device *orig_dev)
681 struct packet_sock *po;
682 struct sockaddr_ll *sll;
684 struct tpacket_hdr *h1;
685 struct tpacket2_hdr *h2;
688 u8 *skb_head = skb->data;
689 int skb_len = skb->len;
690 unsigned int snaplen, res;
691 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
692 unsigned short macoff, netoff, hdrlen;
693 struct sk_buff *copy_skb = NULL;
697 if (skb->pkt_type == PACKET_LOOPBACK)
700 sk = pt->af_packet_priv;
703 if (dev_net(dev) != sock_net(sk))
706 if (dev->header_ops) {
707 if (sk->sk_type != SOCK_DGRAM)
708 skb_push(skb, skb->data - skb_mac_header(skb));
709 else if (skb->pkt_type == PACKET_OUTGOING) {
710 /* Special case: outgoing packets have ll header at head */
711 skb_pull(skb, skb_network_offset(skb));
715 if (skb->ip_summed == CHECKSUM_PARTIAL)
716 status |= TP_STATUS_CSUMNOTREADY;
720 res = run_filter(skb, sk, snaplen);
726 if (sk->sk_type == SOCK_DGRAM) {
727 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
730 unsigned maclen = skb_network_offset(skb);
731 netoff = TPACKET_ALIGN(po->tp_hdrlen +
732 (maclen < 16 ? 16 : maclen)) +
734 macoff = netoff - maclen;
737 if (macoff + snaplen > po->rx_ring.frame_size) {
738 if (po->copy_thresh &&
739 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
740 (unsigned)sk->sk_rcvbuf) {
741 if (skb_shared(skb)) {
742 copy_skb = skb_clone(skb, GFP_ATOMIC);
744 copy_skb = skb_get(skb);
745 skb_head = skb->data;
748 skb_set_owner_r(copy_skb, sk);
750 snaplen = po->rx_ring.frame_size - macoff;
751 if ((int)snaplen < 0)
755 spin_lock(&sk->sk_receive_queue.lock);
756 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
759 packet_increment_head(&po->rx_ring);
760 po->stats.tp_packets++;
762 status |= TP_STATUS_COPY;
763 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
765 if (!po->stats.tp_drops)
766 status &= ~TP_STATUS_LOSING;
767 spin_unlock(&sk->sk_receive_queue.lock);
769 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
771 switch (po->tp_version) {
773 h.h1->tp_len = skb->len;
774 h.h1->tp_snaplen = snaplen;
775 h.h1->tp_mac = macoff;
776 h.h1->tp_net = netoff;
777 if (skb->tstamp.tv64)
778 tv = ktime_to_timeval(skb->tstamp);
780 do_gettimeofday(&tv);
781 h.h1->tp_sec = tv.tv_sec;
782 h.h1->tp_usec = tv.tv_usec;
783 hdrlen = sizeof(*h.h1);
786 h.h2->tp_len = skb->len;
787 h.h2->tp_snaplen = snaplen;
788 h.h2->tp_mac = macoff;
789 h.h2->tp_net = netoff;
790 if (skb->tstamp.tv64)
791 ts = ktime_to_timespec(skb->tstamp);
794 h.h2->tp_sec = ts.tv_sec;
795 h.h2->tp_nsec = ts.tv_nsec;
796 h.h2->tp_vlan_tci = skb->vlan_tci;
797 hdrlen = sizeof(*h.h2);
803 sll = h.raw + TPACKET_ALIGN(hdrlen);
804 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
805 sll->sll_family = AF_PACKET;
806 sll->sll_hatype = dev->type;
807 sll->sll_protocol = skb->protocol;
808 sll->sll_pkttype = skb->pkt_type;
809 if (unlikely(po->origdev))
810 sll->sll_ifindex = orig_dev->ifindex;
812 sll->sll_ifindex = dev->ifindex;
814 __packet_set_status(po, h.raw, status);
817 struct page *p_start, *p_end;
818 u8 *h_end = h.raw + macoff + snaplen - 1;
820 p_start = virt_to_page(h.raw);
821 p_end = virt_to_page(h_end);
822 while (p_start <= p_end) {
823 flush_dcache_page(p_start);
828 sk->sk_data_ready(sk, 0);
831 if (skb_head != skb->data && skb_shared(skb)) {
832 skb->data = skb_head;
840 po->stats.tp_drops++;
842 spin_unlock(&sk->sk_receive_queue.lock);
844 sk->sk_data_ready(sk, 0);
849 static void tpacket_destruct_skb(struct sk_buff *skb)
851 struct packet_sock *po = pkt_sk(skb->sk);
856 if (likely(po->tx_ring.pg_vec)) {
857 ph = skb_shinfo(skb)->destructor_arg;
858 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
859 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
860 atomic_dec(&po->tx_ring.pending);
861 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
867 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
868 void *frame, struct net_device *dev, int size_max,
869 __be16 proto, unsigned char *addr)
872 struct tpacket_hdr *h1;
873 struct tpacket2_hdr *h2;
876 int to_write, offset, len, tp_len, nr_frags, len_max;
877 struct socket *sock = po->sk.sk_socket;
884 skb->protocol = proto;
886 skb->priority = po->sk.sk_priority;
887 skb_shinfo(skb)->destructor_arg = ph.raw;
889 switch (po->tp_version) {
891 tp_len = ph.h2->tp_len;
894 tp_len = ph.h1->tp_len;
897 if (unlikely(tp_len > size_max)) {
898 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
902 skb_reserve(skb, LL_RESERVED_SPACE(dev));
903 skb_reset_network_header(skb);
905 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
908 if (sock->type == SOCK_DGRAM) {
909 err = dev_hard_header(skb, dev, ntohs(proto), addr,
911 if (unlikely(err < 0))
913 } else if (dev->hard_header_len) {
914 /* net device doesn't like empty head */
915 if (unlikely(tp_len <= dev->hard_header_len)) {
916 pr_err("packet size is too short (%d < %d)\n",
917 tp_len, dev->hard_header_len);
921 skb_push(skb, dev->hard_header_len);
922 err = skb_store_bits(skb, 0, data,
923 dev->hard_header_len);
927 data += dev->hard_header_len;
928 to_write -= dev->hard_header_len;
932 page = virt_to_page(data);
933 offset = offset_in_page(data);
934 len_max = PAGE_SIZE - offset;
935 len = ((to_write > len_max) ? len_max : to_write);
937 skb->data_len = to_write;
938 skb->len += to_write;
939 skb->truesize += to_write;
940 atomic_add(to_write, &po->sk.sk_wmem_alloc);
942 while (likely(to_write)) {
943 nr_frags = skb_shinfo(skb)->nr_frags;
945 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
946 pr_err("Packet exceed the number of skb frags(%lu)\n",
951 flush_dcache_page(page);
953 skb_fill_page_desc(skb,
955 page++, offset, len);
959 len = ((to_write > len_max) ? len_max : to_write);
965 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
969 struct net_device *dev;
971 int ifindex, err, reserve = 0;
973 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
974 int tp_len, size_max;
979 sock = po->sk.sk_socket;
981 mutex_lock(&po->pg_vec_lock);
985 ifindex = po->ifindex;
990 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
992 if (msg->msg_namelen < (saddr->sll_halen
993 + offsetof(struct sockaddr_ll,
996 ifindex = saddr->sll_ifindex;
997 proto = saddr->sll_protocol;
998 addr = saddr->sll_addr;
1001 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
1003 if (unlikely(dev == NULL))
1006 reserve = dev->hard_header_len;
1009 if (unlikely(!(dev->flags & IFF_UP)))
1012 size_max = po->tx_ring.frame_size
1013 - sizeof(struct skb_shared_info)
1015 - LL_ALLOCATED_SPACE(dev)
1016 - sizeof(struct sockaddr_ll);
1018 if (size_max > dev->mtu + reserve)
1019 size_max = dev->mtu + reserve;
1022 ph = packet_current_frame(po, &po->tx_ring,
1023 TP_STATUS_SEND_REQUEST);
1025 if (unlikely(ph == NULL)) {
1030 status = TP_STATUS_SEND_REQUEST;
1031 skb = sock_alloc_send_skb(&po->sk,
1032 LL_ALLOCATED_SPACE(dev)
1033 + sizeof(struct sockaddr_ll),
1036 if (unlikely(skb == NULL))
1039 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1042 if (unlikely(tp_len < 0)) {
1044 __packet_set_status(po, ph,
1045 TP_STATUS_AVAILABLE);
1046 packet_increment_head(&po->tx_ring);
1050 status = TP_STATUS_WRONG_FORMAT;
1056 skb->destructor = tpacket_destruct_skb;
1057 __packet_set_status(po, ph, TP_STATUS_SENDING);
1058 atomic_inc(&po->tx_ring.pending);
1060 status = TP_STATUS_SEND_REQUEST;
1061 err = dev_queue_xmit(skb);
1062 if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
1064 packet_increment_head(&po->tx_ring);
1066 } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
1067 && (atomic_read(&po->tx_ring.pending))))
1074 skb->destructor = sock_wfree;
1075 atomic_dec(&po->tx_ring.pending);
1077 __packet_set_status(po, ph, status);
1082 mutex_unlock(&po->pg_vec_lock);
1087 static int packet_snd(struct socket *sock,
1088 struct msghdr *msg, size_t len)
1090 struct sock *sk = sock->sk;
1091 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1092 struct sk_buff *skb;
1093 struct net_device *dev;
1095 unsigned char *addr;
1096 int ifindex, err, reserve = 0;
1099 * Get and verify the address.
1102 if (saddr == NULL) {
1103 struct packet_sock *po = pkt_sk(sk);
1105 ifindex = po->ifindex;
1110 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1112 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1114 ifindex = saddr->sll_ifindex;
1115 proto = saddr->sll_protocol;
1116 addr = saddr->sll_addr;
1120 dev = dev_get_by_index(sock_net(sk), ifindex);
1124 if (sock->type == SOCK_RAW)
1125 reserve = dev->hard_header_len;
1128 if (!(dev->flags & IFF_UP))
1132 if (len > dev->mtu+reserve)
1135 skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
1136 msg->msg_flags & MSG_DONTWAIT, &err);
1140 skb_reserve(skb, LL_RESERVED_SPACE(dev));
1141 skb_reset_network_header(skb);
1144 if (sock->type == SOCK_DGRAM &&
1145 dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len) < 0)
1148 /* Returns -EFAULT on error */
1149 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1153 skb->protocol = proto;
1155 skb->priority = sk->sk_priority;
1161 err = dev_queue_xmit(skb);
1162 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1178 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1179 struct msghdr *msg, size_t len)
1181 #ifdef CONFIG_PACKET_MMAP
1182 struct sock *sk = sock->sk;
1183 struct packet_sock *po = pkt_sk(sk);
1184 if (po->tx_ring.pg_vec)
1185 return tpacket_snd(po, msg);
1188 return packet_snd(sock, msg, len);
1192 * Close a PACKET socket. This is fairly simple. We immediately go
1193 * to 'closed' state and remove our protocol entry in the device list.
1196 static int packet_release(struct socket *sock)
1198 struct sock *sk = sock->sk;
1199 struct packet_sock *po;
1201 #ifdef CONFIG_PACKET_MMAP
1202 struct tpacket_req req;
1211 write_lock_bh(&net->packet.sklist_lock);
1212 sk_del_node_init(sk);
1213 sock_prot_inuse_add(net, sk->sk_prot, -1);
1214 write_unlock_bh(&net->packet.sklist_lock);
1217 * Unhook packet receive handler.
1222 * Remove the protocol hook
1224 dev_remove_pack(&po->prot_hook);
1230 packet_flush_mclist(sk);
1232 #ifdef CONFIG_PACKET_MMAP
1233 memset(&req, 0, sizeof(req));
1235 if (po->rx_ring.pg_vec)
1236 packet_set_ring(sk, &req, 1, 0);
1238 if (po->tx_ring.pg_vec)
1239 packet_set_ring(sk, &req, 1, 1);
1243 * Now the socket is dead. No more input will appear.
1251 skb_queue_purge(&sk->sk_receive_queue);
1252 sk_refcnt_debug_release(sk);
1259 * Attach a packet hook.
1262 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1264 struct packet_sock *po = pkt_sk(sk);
1266 * Detach an existing hook if present.
1271 spin_lock(&po->bind_lock);
1276 spin_unlock(&po->bind_lock);
1277 dev_remove_pack(&po->prot_hook);
1278 spin_lock(&po->bind_lock);
1282 po->prot_hook.type = protocol;
1283 po->prot_hook.dev = dev;
1285 po->ifindex = dev ? dev->ifindex : 0;
1290 if (!dev || (dev->flags & IFF_UP)) {
1291 dev_add_pack(&po->prot_hook);
1295 sk->sk_err = ENETDOWN;
1296 if (!sock_flag(sk, SOCK_DEAD))
1297 sk->sk_error_report(sk);
1301 spin_unlock(&po->bind_lock);
1307 * Bind a packet socket to a device
1310 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1313 struct sock *sk = sock->sk;
1315 struct net_device *dev;
1322 if (addr_len != sizeof(struct sockaddr))
1324 strlcpy(name, uaddr->sa_data, sizeof(name));
1326 dev = dev_get_by_name(sock_net(sk), name);
1328 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1334 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1336 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1337 struct sock *sk = sock->sk;
1338 struct net_device *dev = NULL;
1346 if (addr_len < sizeof(struct sockaddr_ll))
1348 if (sll->sll_family != AF_PACKET)
1351 if (sll->sll_ifindex) {
1353 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1357 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1365 static struct proto packet_proto = {
1367 .owner = THIS_MODULE,
1368 .obj_size = sizeof(struct packet_sock),
1372 * Create a packet of type SOCK_PACKET.
1375 static int packet_create(struct net *net, struct socket *sock, int protocol)
1378 struct packet_sock *po;
1379 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1382 if (!capable(CAP_NET_RAW))
1384 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1385 sock->type != SOCK_PACKET)
1386 return -ESOCKTNOSUPPORT;
1388 sock->state = SS_UNCONNECTED;
1391 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1395 sock->ops = &packet_ops;
1396 if (sock->type == SOCK_PACKET)
1397 sock->ops = &packet_ops_spkt;
1399 sock_init_data(sock, sk);
1402 sk->sk_family = PF_PACKET;
1405 sk->sk_destruct = packet_sock_destruct;
1406 sk_refcnt_debug_inc(sk);
1409 * Attach a protocol block
1412 spin_lock_init(&po->bind_lock);
1413 mutex_init(&po->pg_vec_lock);
1414 po->prot_hook.func = packet_rcv;
1416 if (sock->type == SOCK_PACKET)
1417 po->prot_hook.func = packet_rcv_spkt;
1419 po->prot_hook.af_packet_priv = sk;
1422 po->prot_hook.type = proto;
1423 dev_add_pack(&po->prot_hook);
1428 write_lock_bh(&net->packet.sklist_lock);
1429 sk_add_node(sk, &net->packet.sklist);
1430 sock_prot_inuse_add(net, &packet_proto, 1);
1431 write_unlock_bh(&net->packet.sklist_lock);
1438 * Pull a packet from our receive queue and hand it to the user.
1439 * If necessary we block.
1442 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1443 struct msghdr *msg, size_t len, int flags)
1445 struct sock *sk = sock->sk;
1446 struct sk_buff *skb;
1448 struct sockaddr_ll *sll;
1452 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
1456 /* What error should we return now? EUNATTACH? */
1457 if (pkt_sk(sk)->ifindex < 0)
1462 * Call the generic datagram receiver. This handles all sorts
1463 * of horrible races and re-entrancy so we can forget about it
1464 * in the protocol layers.
1466 * Now it will return ENETDOWN, if device have just gone down,
1467 * but then it will block.
1470 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1473 * An error occurred so return it. Because skb_recv_datagram()
1474 * handles the blocking we don't see and worry about blocking
1482 * If the address length field is there to be filled in, we fill
1486 sll = &PACKET_SKB_CB(skb)->sa.ll;
1487 if (sock->type == SOCK_PACKET)
1488 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1490 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1493 * You lose any data beyond the buffer you gave. If it worries a
1494 * user program they can ask the device for its MTU anyway.
1500 msg->msg_flags |= MSG_TRUNC;
1503 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1507 sock_recv_timestamp(msg, sk, skb);
1510 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1513 if (pkt_sk(sk)->auxdata) {
1514 struct tpacket_auxdata aux;
1516 aux.tp_status = TP_STATUS_USER;
1517 if (skb->ip_summed == CHECKSUM_PARTIAL)
1518 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1519 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1520 aux.tp_snaplen = skb->len;
1522 aux.tp_net = skb_network_offset(skb);
1523 aux.tp_vlan_tci = skb->vlan_tci;
1525 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1528 gap = check_packet_gap(skb);
1530 put_cmsg(msg, SOL_PACKET, PACKET_GAPDATA, sizeof(__u32), &gap);
1533 * Free or return the buffer as appropriate. Again this
1534 * hides all the races and re-entrancy issues from us.
1536 err = (flags&MSG_TRUNC) ? skb->len : copied;
1539 skb_free_datagram(sk, skb);
1544 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1545 int *uaddr_len, int peer)
1547 struct net_device *dev;
1548 struct sock *sk = sock->sk;
1553 uaddr->sa_family = AF_PACKET;
1554 dev = dev_get_by_index(sock_net(sk), pkt_sk(sk)->ifindex);
1556 strlcpy(uaddr->sa_data, dev->name, 15);
1559 memset(uaddr->sa_data, 0, 14);
1560 *uaddr_len = sizeof(*uaddr);
1565 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1566 int *uaddr_len, int peer)
1568 struct net_device *dev;
1569 struct sock *sk = sock->sk;
1570 struct packet_sock *po = pkt_sk(sk);
1571 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1576 sll->sll_family = AF_PACKET;
1577 sll->sll_ifindex = po->ifindex;
1578 sll->sll_protocol = po->num;
1579 dev = dev_get_by_index(sock_net(sk), po->ifindex);
1581 sll->sll_hatype = dev->type;
1582 sll->sll_halen = dev->addr_len;
1583 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1586 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1589 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1594 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1598 case PACKET_MR_MULTICAST:
1600 return dev_mc_add(dev, i->addr, i->alen, 0);
1602 return dev_mc_delete(dev, i->addr, i->alen, 0);
1604 case PACKET_MR_PROMISC:
1605 return dev_set_promiscuity(dev, what);
1607 case PACKET_MR_ALLMULTI:
1608 return dev_set_allmulti(dev, what);
1610 case PACKET_MR_UNICAST:
1612 return dev_unicast_add(dev, i->addr);
1614 return dev_unicast_delete(dev, i->addr);
1622 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1624 for ( ; i; i = i->next) {
1625 if (i->ifindex == dev->ifindex)
1626 packet_dev_mc(dev, i, what);
1630 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1632 struct packet_sock *po = pkt_sk(sk);
1633 struct packet_mclist *ml, *i;
1634 struct net_device *dev;
1640 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1645 if (mreq->mr_alen > dev->addr_len)
1649 i = kmalloc(sizeof(*i), GFP_KERNEL);
1654 for (ml = po->mclist; ml; ml = ml->next) {
1655 if (ml->ifindex == mreq->mr_ifindex &&
1656 ml->type == mreq->mr_type &&
1657 ml->alen == mreq->mr_alen &&
1658 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1660 /* Free the new element ... */
1666 i->type = mreq->mr_type;
1667 i->ifindex = mreq->mr_ifindex;
1668 i->alen = mreq->mr_alen;
1669 memcpy(i->addr, mreq->mr_address, i->alen);
1671 i->next = po->mclist;
1673 err = packet_dev_mc(dev, i, 1);
1675 po->mclist = i->next;
1684 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1686 struct packet_mclist *ml, **mlp;
1690 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1691 if (ml->ifindex == mreq->mr_ifindex &&
1692 ml->type == mreq->mr_type &&
1693 ml->alen == mreq->mr_alen &&
1694 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1695 if (--ml->count == 0) {
1696 struct net_device *dev;
1698 dev = dev_get_by_index(sock_net(sk), ml->ifindex);
1700 packet_dev_mc(dev, ml, -1);
1710 return -EADDRNOTAVAIL;
1713 static void packet_flush_mclist(struct sock *sk)
1715 struct packet_sock *po = pkt_sk(sk);
1716 struct packet_mclist *ml;
1722 while ((ml = po->mclist) != NULL) {
1723 struct net_device *dev;
1725 po->mclist = ml->next;
1726 dev = dev_get_by_index(sock_net(sk), ml->ifindex);
1728 packet_dev_mc(dev, ml, -1);
1737 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1739 struct sock *sk = sock->sk;
1740 struct packet_sock *po = pkt_sk(sk);
1743 if (level != SOL_PACKET)
1744 return -ENOPROTOOPT;
1747 case PACKET_ADD_MEMBERSHIP:
1748 case PACKET_DROP_MEMBERSHIP:
1750 struct packet_mreq_max mreq;
1752 memset(&mreq, 0, sizeof(mreq));
1753 if (len < sizeof(struct packet_mreq))
1755 if (len > sizeof(mreq))
1757 if (copy_from_user(&mreq, optval, len))
1759 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1761 if (optname == PACKET_ADD_MEMBERSHIP)
1762 ret = packet_mc_add(sk, &mreq);
1764 ret = packet_mc_drop(sk, &mreq);
1768 #ifdef CONFIG_PACKET_MMAP
1769 case PACKET_RX_RING:
1770 case PACKET_TX_RING:
1772 struct tpacket_req req;
1774 if (optlen < sizeof(req))
1776 if (copy_from_user(&req, optval, sizeof(req)))
1778 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1780 case PACKET_COPY_THRESH:
1784 if (optlen != sizeof(val))
1786 if (copy_from_user(&val, optval, sizeof(val)))
1789 pkt_sk(sk)->copy_thresh = val;
1792 case PACKET_VERSION:
1796 if (optlen != sizeof(val))
1798 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1800 if (copy_from_user(&val, optval, sizeof(val)))
1805 po->tp_version = val;
1811 case PACKET_RESERVE:
1815 if (optlen != sizeof(val))
1817 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1819 if (copy_from_user(&val, optval, sizeof(val)))
1821 po->tp_reserve = val;
1828 if (optlen != sizeof(val))
1830 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1832 if (copy_from_user(&val, optval, sizeof(val)))
1834 po->tp_loss = !!val;
1838 case PACKET_AUXDATA:
1842 if (optlen < sizeof(val))
1844 if (copy_from_user(&val, optval, sizeof(val)))
1847 po->auxdata = !!val;
1850 case PACKET_ORIGDEV:
1854 if (optlen < sizeof(val))
1856 if (copy_from_user(&val, optval, sizeof(val)))
1859 po->origdev = !!val;
1863 return -ENOPROTOOPT;
1867 static int packet_getsockopt(struct socket *sock, int level, int optname,
1868 char __user *optval, int __user *optlen)
1872 struct sock *sk = sock->sk;
1873 struct packet_sock *po = pkt_sk(sk);
1875 struct tpacket_stats st;
1877 if (level != SOL_PACKET)
1878 return -ENOPROTOOPT;
1880 if (get_user(len, optlen))
1887 case PACKET_STATISTICS:
1888 if (len > sizeof(struct tpacket_stats))
1889 len = sizeof(struct tpacket_stats);
1890 spin_lock_bh(&sk->sk_receive_queue.lock);
1892 memset(&po->stats, 0, sizeof(st));
1893 spin_unlock_bh(&sk->sk_receive_queue.lock);
1894 st.tp_packets += st.tp_drops;
1898 case PACKET_AUXDATA:
1899 if (len > sizeof(int))
1905 case PACKET_ORIGDEV:
1906 if (len > sizeof(int))
1912 #ifdef CONFIG_PACKET_MMAP
1913 case PACKET_VERSION:
1914 if (len > sizeof(int))
1916 val = po->tp_version;
1920 if (len > sizeof(int))
1922 if (copy_from_user(&val, optval, len))
1926 val = sizeof(struct tpacket_hdr);
1929 val = sizeof(struct tpacket2_hdr);
1936 case PACKET_RESERVE:
1937 if (len > sizeof(unsigned int))
1938 len = sizeof(unsigned int);
1939 val = po->tp_reserve;
1943 if (len > sizeof(unsigned int))
1944 len = sizeof(unsigned int);
1950 return -ENOPROTOOPT;
1953 if (put_user(len, optlen))
1955 if (copy_to_user(optval, data, len))
1961 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
1964 struct hlist_node *node;
1965 struct net_device *dev = data;
1966 struct net *net = dev_net(dev);
1968 read_lock(&net->packet.sklist_lock);
1969 sk_for_each(sk, node, &net->packet.sklist) {
1970 struct packet_sock *po = pkt_sk(sk);
1973 case NETDEV_UNREGISTER:
1975 packet_dev_mclist(dev, po->mclist, -1);
1979 if (dev->ifindex == po->ifindex) {
1980 spin_lock(&po->bind_lock);
1982 __dev_remove_pack(&po->prot_hook);
1985 sk->sk_err = ENETDOWN;
1986 if (!sock_flag(sk, SOCK_DEAD))
1987 sk->sk_error_report(sk);
1989 if (msg == NETDEV_UNREGISTER) {
1991 po->prot_hook.dev = NULL;
1993 spin_unlock(&po->bind_lock);
1997 spin_lock(&po->bind_lock);
1998 if (dev->ifindex == po->ifindex && po->num &&
2000 dev_add_pack(&po->prot_hook);
2004 spin_unlock(&po->bind_lock);
2008 read_unlock(&net->packet.sklist_lock);
2013 static int packet_ioctl(struct socket *sock, unsigned int cmd,
2016 struct sock *sk = sock->sk;
2021 int amount = sk_wmem_alloc_get(sk);
2023 return put_user(amount, (int __user *)arg);
2027 struct sk_buff *skb;
2030 spin_lock_bh(&sk->sk_receive_queue.lock);
2031 skb = skb_peek(&sk->sk_receive_queue);
2034 spin_unlock_bh(&sk->sk_receive_queue.lock);
2035 return put_user(amount, (int __user *)arg);
2038 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2040 return sock_get_timestampns(sk, (struct timespec __user *)arg);
2050 case SIOCGIFBRDADDR:
2051 case SIOCSIFBRDADDR:
2052 case SIOCGIFNETMASK:
2053 case SIOCSIFNETMASK:
2054 case SIOCGIFDSTADDR:
2055 case SIOCSIFDSTADDR:
2057 if (!net_eq(sock_net(sk), &init_net))
2058 return -ENOIOCTLCMD;
2059 return inet_dgram_ops.ioctl(sock, cmd, arg);
2063 return -ENOIOCTLCMD;
2068 #ifndef CONFIG_PACKET_MMAP
2069 #define packet_mmap sock_no_mmap
2070 #define packet_poll datagram_poll
2073 static unsigned int packet_poll(struct file *file, struct socket *sock,
2076 struct sock *sk = sock->sk;
2077 struct packet_sock *po = pkt_sk(sk);
2078 unsigned int mask = datagram_poll(file, sock, wait);
2080 spin_lock_bh(&sk->sk_receive_queue.lock);
2081 if (po->rx_ring.pg_vec) {
2082 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
2083 mask |= POLLIN | POLLRDNORM;
2085 spin_unlock_bh(&sk->sk_receive_queue.lock);
2086 spin_lock_bh(&sk->sk_write_queue.lock);
2087 if (po->tx_ring.pg_vec) {
2088 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2089 mask |= POLLOUT | POLLWRNORM;
2091 spin_unlock_bh(&sk->sk_write_queue.lock);
2096 /* Dirty? Well, I still did not learn better way to account
2100 static void packet_mm_open(struct vm_area_struct *vma)
2102 struct file *file = vma->vm_file;
2103 struct socket *sock = file->private_data;
2104 struct sock *sk = sock->sk;
2107 atomic_inc(&pkt_sk(sk)->mapped);
2110 static void packet_mm_close(struct vm_area_struct *vma)
2112 struct file *file = vma->vm_file;
2113 struct socket *sock = file->private_data;
2114 struct sock *sk = sock->sk;
2117 atomic_dec(&pkt_sk(sk)->mapped);
2120 static const struct vm_operations_struct packet_mmap_ops = {
2121 .open = packet_mm_open,
2122 .close = packet_mm_close,
2125 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
2129 for (i = 0; i < len; i++) {
2130 if (likely(pg_vec[i]))
2131 free_pages((unsigned long) pg_vec[i], order);
2136 static inline char *alloc_one_pg_vec_page(unsigned long order)
2138 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
2140 return (char *) __get_free_pages(gfp_flags, order);
2143 static char **alloc_pg_vec(struct tpacket_req *req, int order)
2145 unsigned int block_nr = req->tp_block_nr;
2149 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
2150 if (unlikely(!pg_vec))
2153 for (i = 0; i < block_nr; i++) {
2154 pg_vec[i] = alloc_one_pg_vec_page(order);
2155 if (unlikely(!pg_vec[i]))
2156 goto out_free_pgvec;
2163 free_pg_vec(pg_vec, order, block_nr);
2168 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2169 int closing, int tx_ring)
2171 char **pg_vec = NULL;
2172 struct packet_sock *po = pkt_sk(sk);
2173 int was_running, order = 0;
2174 struct packet_ring_buffer *rb;
2175 struct sk_buff_head *rb_queue;
2179 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2180 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
2184 if (atomic_read(&po->mapped))
2186 if (atomic_read(&rb->pending))
2190 if (req->tp_block_nr) {
2191 /* Sanity tests and some calculations */
2193 if (unlikely(rb->pg_vec))
2196 switch (po->tp_version) {
2198 po->tp_hdrlen = TPACKET_HDRLEN;
2201 po->tp_hdrlen = TPACKET2_HDRLEN;
2206 if (unlikely((int)req->tp_block_size <= 0))
2208 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
2210 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
2213 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
2216 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2217 if (unlikely(rb->frames_per_block <= 0))
2219 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2224 order = get_order(req->tp_block_size);
2225 pg_vec = alloc_pg_vec(req, order);
2226 if (unlikely(!pg_vec))
2232 if (unlikely(req->tp_frame_nr))
2238 /* Detach socket from network */
2239 spin_lock(&po->bind_lock);
2240 was_running = po->running;
2243 __dev_remove_pack(&po->prot_hook);
2248 spin_unlock(&po->bind_lock);
2253 mutex_lock(&po->pg_vec_lock);
2254 if (closing || atomic_read(&po->mapped) == 0) {
2256 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
2257 spin_lock_bh(&rb_queue->lock);
2258 pg_vec = XC(rb->pg_vec, pg_vec);
2259 rb->frame_max = (req->tp_frame_nr - 1);
2261 rb->frame_size = req->tp_frame_size;
2262 spin_unlock_bh(&rb_queue->lock);
2264 order = XC(rb->pg_vec_order, order);
2265 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
2267 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2268 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2269 tpacket_rcv : packet_rcv;
2270 skb_queue_purge(rb_queue);
2272 if (atomic_read(&po->mapped))
2273 pr_err("packet_mmap: vma is busy: %d\n",
2274 atomic_read(&po->mapped));
2276 mutex_unlock(&po->pg_vec_lock);
2278 spin_lock(&po->bind_lock);
2279 if (was_running && !po->running) {
2283 dev_add_pack(&po->prot_hook);
2285 spin_unlock(&po->bind_lock);
2290 free_pg_vec(pg_vec, order, req->tp_block_nr);
2295 static int packet_mmap(struct file *file, struct socket *sock,
2296 struct vm_area_struct *vma)
2298 struct sock *sk = sock->sk;
2299 struct packet_sock *po = pkt_sk(sk);
2300 unsigned long size, expected_size;
2301 struct packet_ring_buffer *rb;
2302 unsigned long start;
2309 mutex_lock(&po->pg_vec_lock);
2312 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2314 expected_size += rb->pg_vec_len
2320 if (expected_size == 0)
2323 size = vma->vm_end - vma->vm_start;
2324 if (size != expected_size)
2327 start = vma->vm_start;
2328 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2329 if (rb->pg_vec == NULL)
2332 for (i = 0; i < rb->pg_vec_len; i++) {
2333 struct page *page = virt_to_page(rb->pg_vec[i]);
2336 for (pg_num = 0; pg_num < rb->pg_vec_pages;
2338 err = vm_insert_page(vma, start, page);
2346 atomic_inc(&po->mapped);
2347 vma->vm_ops = &packet_mmap_ops;
2351 mutex_unlock(&po->pg_vec_lock);
2357 static const struct proto_ops packet_ops_spkt = {
2358 .family = PF_PACKET,
2359 .owner = THIS_MODULE,
2360 .release = packet_release,
2361 .bind = packet_bind_spkt,
2362 .connect = sock_no_connect,
2363 .socketpair = sock_no_socketpair,
2364 .accept = sock_no_accept,
2365 .getname = packet_getname_spkt,
2366 .poll = datagram_poll,
2367 .ioctl = packet_ioctl,
2368 .listen = sock_no_listen,
2369 .shutdown = sock_no_shutdown,
2370 .setsockopt = sock_no_setsockopt,
2371 .getsockopt = sock_no_getsockopt,
2372 .sendmsg = packet_sendmsg_spkt,
2373 .recvmsg = packet_recvmsg,
2374 .mmap = sock_no_mmap,
2375 .sendpage = sock_no_sendpage,
2378 static const struct proto_ops packet_ops = {
2379 .family = PF_PACKET,
2380 .owner = THIS_MODULE,
2381 .release = packet_release,
2382 .bind = packet_bind,
2383 .connect = sock_no_connect,
2384 .socketpair = sock_no_socketpair,
2385 .accept = sock_no_accept,
2386 .getname = packet_getname,
2387 .poll = packet_poll,
2388 .ioctl = packet_ioctl,
2389 .listen = sock_no_listen,
2390 .shutdown = sock_no_shutdown,
2391 .setsockopt = packet_setsockopt,
2392 .getsockopt = packet_getsockopt,
2393 .sendmsg = packet_sendmsg,
2394 .recvmsg = packet_recvmsg,
2395 .mmap = packet_mmap,
2396 .sendpage = sock_no_sendpage,
2399 static struct net_proto_family packet_family_ops = {
2400 .family = PF_PACKET,
2401 .create = packet_create,
2402 .owner = THIS_MODULE,
2405 static struct notifier_block packet_netdev_notifier = {
2406 .notifier_call = packet_notifier,
2409 #ifdef CONFIG_PROC_FS
2410 static inline struct sock *packet_seq_idx(struct net *net, loff_t off)
2413 struct hlist_node *node;
2415 sk_for_each(s, node, &net->packet.sklist) {
2422 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2423 __acquires(seq_file_net(seq)->packet.sklist_lock)
2425 struct net *net = seq_file_net(seq);
2426 read_lock(&net->packet.sklist_lock);
2427 return *pos ? packet_seq_idx(net, *pos - 1) : SEQ_START_TOKEN;
2430 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2432 struct net *net = seq_file_net(seq);
2434 return (v == SEQ_START_TOKEN)
2435 ? sk_head(&net->packet.sklist)
2436 : sk_next((struct sock *)v) ;
2439 static void packet_seq_stop(struct seq_file *seq, void *v)
2440 __releases(seq_file_net(seq)->packet.sklist_lock)
2442 struct net *net = seq_file_net(seq);
2443 read_unlock(&net->packet.sklist_lock);
2446 static int packet_seq_show(struct seq_file *seq, void *v)
2448 if (v == SEQ_START_TOKEN)
2449 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2452 const struct packet_sock *po = pkt_sk(s);
2455 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2457 atomic_read(&s->sk_refcnt),
2462 atomic_read(&s->sk_rmem_alloc),
2470 static const struct seq_operations packet_seq_ops = {
2471 .start = packet_seq_start,
2472 .next = packet_seq_next,
2473 .stop = packet_seq_stop,
2474 .show = packet_seq_show,
2477 static int packet_seq_open(struct inode *inode, struct file *file)
2479 return seq_open_net(inode, file, &packet_seq_ops,
2480 sizeof(struct seq_net_private));
2483 static const struct file_operations packet_seq_fops = {
2484 .owner = THIS_MODULE,
2485 .open = packet_seq_open,
2487 .llseek = seq_lseek,
2488 .release = seq_release_net,
2493 static int packet_net_init(struct net *net)
2495 rwlock_init(&net->packet.sklist_lock);
2496 INIT_HLIST_HEAD(&net->packet.sklist);
2498 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2504 static void packet_net_exit(struct net *net)
2506 proc_net_remove(net, "packet");
2509 static struct pernet_operations packet_net_ops = {
2510 .init = packet_net_init,
2511 .exit = packet_net_exit,
2515 static void __exit packet_exit(void)
2517 unregister_netdevice_notifier(&packet_netdev_notifier);
2518 unregister_pernet_subsys(&packet_net_ops);
2519 sock_unregister(PF_PACKET);
2520 proto_unregister(&packet_proto);
2523 static int __init packet_init(void)
2525 int rc = proto_register(&packet_proto, 0);
2530 sock_register(&packet_family_ops);
2531 register_pernet_subsys(&packet_net_ops);
2532 register_netdevice_notifier(&packet_netdev_notifier);
2537 module_init(packet_init);
2538 module_exit(packet_exit);
2539 MODULE_LICENSE("GPL");
2540 MODULE_ALIAS_NETPROTO(PF_PACKET);