#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
+#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
atomic_t count;
struct tun_struct *tun;
struct net *net;
- wait_queue_head_t read_wait;
};
+struct tun_sock;
+
struct tun_struct {
struct tun_file *tfile;
unsigned int flags;
struct fasync_struct *fasync;
struct tap_filter txflt;
+ struct sock *sk;
+ struct socket socket;
#ifdef TUN_DEBUG
int debug;
#endif
};
+struct tun_sock {
+ struct sock sk;
+ struct tun_struct *tun;
+};
+
+static inline struct tun_sock *tun_sk(struct sock *sk)
+{
+ return container_of(sk, struct tun_sock, sk);
+}
+
static int tun_attach(struct tun_struct *tun, struct file *file)
{
struct tun_file *tfile = file->private_data;
tfile->tun = tun;
tun->tfile = tfile;
dev_hold(tun->dev);
+ sock_hold(tun->sk);
atomic_inc(&tfile->count);
out:
static void __tun_detach(struct tun_struct *tun)
{
- struct tun_file *tfile = tun->tfile;
-
/* Detach from net device */
netif_tx_lock_bh(tun->dev);
- tfile->tun = NULL;
tun->tfile = NULL;
netif_tx_unlock_bh(tun->dev);
nexact = n;
- /* The rest is hashed */
+ /* Remaining multicast addresses are hashed,
+ * unicast will leave the filter disabled. */
memset(filter->mask, 0, sizeof(filter->mask));
- for (; n < uf.count; n++)
+ for (; n < uf.count; n++) {
+ if (!is_multicast_ether_addr(addr[n].u)) {
+ err = 0; /* no filter */
+ goto done;
+ }
addr_hash_set(filter->mask, addr[n].u);
+ }
/* For ALLMULTI just set the mask to all ones.
* This overrides the mask populated above. */
/* Inform the methods they need to stop using the dev.
*/
if (tfile) {
- wake_up_all(&tfile->read_wait);
+ wake_up_all(&tun->socket.wait);
if (atomic_dec_and_test(&tfile->count))
__tun_detach(tun);
}
}
+static void tun_free_netdev(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ sock_put(tun->sk);
+}
+
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible(&tun->tfile->read_wait);
+ wake_up_interruptible(&tun->socket.wait);
return 0;
drop:
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
- unsigned int mask = POLLOUT | POLLWRNORM;
+ struct sock *sk = tun->sk;
+ unsigned int mask = 0;
if (!tun)
return POLLERR;
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
- poll_wait(file, &tfile->read_wait, wait);
+ poll_wait(file, &tun->socket.wait, wait);
if (!skb_queue_empty(&tun->readq))
mask |= POLLIN | POLLRDNORM;
+ if (sock_writeable(sk) ||
+ (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+ sock_writeable(sk)))
+ mask |= POLLOUT | POLLWRNORM;
+
if (tun->dev->reg_state != NETREG_REGISTERED)
mask = POLLERR;
/* prepad is the amount to reserve at front. len is length after that.
* linear is a hint as to how much to copy (usually headers). */
-static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear,
- gfp_t gfp)
+static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
+ size_t prepad, size_t len,
+ size_t linear, int noblock)
{
+ struct sock *sk = tun->sk;
struct sk_buff *skb;
- unsigned int i;
-
- skb = alloc_skb(prepad + len, gfp|__GFP_NOWARN);
- if (skb) {
- skb_reserve(skb, prepad);
- skb_put(skb, len);
- return skb;
- }
+ int err;
/* Under a page? Don't bother with paged skb. */
- if (prepad + len < PAGE_SIZE)
- return NULL;
+ if (prepad + len < PAGE_SIZE || !linear)
+ linear = len;
- /* Start with a normal skb, and add pages. */
- skb = alloc_skb(prepad + linear, gfp);
+ skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
+ &err);
if (!skb)
- return NULL;
+ return ERR_PTR(err);
skb_reserve(skb, prepad);
skb_put(skb, linear);
-
- len -= linear;
-
- for (i = 0; i < MAX_SKB_FRAGS; i++) {
- skb_frag_t *f = &skb_shinfo(skb)->frags[i];
-
- f->page = alloc_page(gfp|__GFP_ZERO);
- if (!f->page)
- break;
-
- f->page_offset = 0;
- f->size = PAGE_SIZE;
-
- skb->data_len += PAGE_SIZE;
- skb->len += PAGE_SIZE;
- skb->truesize += PAGE_SIZE;
- skb_shinfo(skb)->nr_frags++;
-
- if (len < PAGE_SIZE) {
- len = 0;
- break;
- }
- len -= PAGE_SIZE;
- }
-
- /* Too large, or alloc fail? */
- if (unlikely(len)) {
- kfree_skb(skb);
- skb = NULL;
- }
+ skb->data_len = len - linear;
+ skb->len += len - linear;
return skb;
}
/* Get packet from user space buffer */
-static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count)
+static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
+ const struct iovec *iv, size_t count,
+ int noblock)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
size_t len = count, align = 0;
struct virtio_net_hdr gso = { 0 };
+ int offset = 0;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) > count)
return -EINVAL;
- if(memcpy_fromiovec((void *)&pi, iv, sizeof(pi)))
+ if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
return -EFAULT;
+ offset += sizeof(pi);
}
if (tun->flags & TUN_VNET_HDR) {
if ((len -= sizeof(gso)) > count)
return -EINVAL;
- if (memcpy_fromiovec((void *)&gso, iv, sizeof(gso)))
+ if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
if (gso.hdr_len > len)
return -EINVAL;
+ offset += sizeof(pi);
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
align = NET_IP_ALIGN;
- if (unlikely(len < ETH_HLEN))
+ if (unlikely(len < ETH_HLEN ||
+ (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
return -EINVAL;
}
- if (!(skb = tun_alloc_skb(align, len, gso.hdr_len, GFP_KERNEL))) {
- tun->dev->stats.rx_dropped++;
- return -ENOMEM;
+ skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock);
+ if (IS_ERR(skb)) {
+ if (PTR_ERR(skb) != -EAGAIN)
+ tun->dev->stats.rx_dropped++;
+ return PTR_ERR(skb);
}
- if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
+ if (skb_copy_datagram_from_iovec(skb, 0, iv, offset, len)) {
tun->dev->stats.rx_dropped++;
kfree_skb(skb);
return -EFAULT;
static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
unsigned long count, loff_t pos)
{
- struct tun_struct *tun = tun_get(iocb->ki_filp);
+ struct file *file = iocb->ki_filp;
+ struct tun_struct *tun = tun_get(file);
ssize_t result;
if (!tun)
DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
- result = tun_get_user(tun, (struct iovec *) iv, iov_length(iv, count));
+ result = tun_get_user(tun, iv, iov_length(iv, count),
+ file->f_flags & O_NONBLOCK);
tun_put(tun);
return result;
/* Put packet to the user space buffer */
static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
struct sk_buff *skb,
- struct iovec *iv, int len)
+ const struct iovec *iv, int len)
{
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
pi.flags |= TUN_PKT_STRIP;
}
- if (memcpy_toiovec(iv, (void *) &pi, sizeof(pi)))
+ if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
return -EFAULT;
total += sizeof(pi);
}
gso.csum_offset = skb->csum_offset;
} /* else everything is zero */
- if (unlikely(memcpy_toiovec(iv, (void *)&gso, sizeof(gso))))
+ if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
+ sizeof(gso))))
return -EFAULT;
total += sizeof(gso);
}
len = min_t(int, skb->len, len);
- skb_copy_datagram_iovec(skb, 0, iv, len);
+ skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
total += len;
tun->dev->stats.tx_packets++;
goto out;
}
- add_wait_queue(&tfile->read_wait, &wait);
+ add_wait_queue(&tun->socket.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
}
netif_wake_queue(tun->dev);
- ret = tun_put_user(tun, skb, (struct iovec *) iv, len);
+ ret = tun_put_user(tun, skb, iv, len);
kfree_skb(skb);
break;
}
current->state = TASK_RUNNING;
- remove_wait_queue(&tfile->read_wait, &wait);
+ remove_wait_queue(&tun->socket.wait, &wait);
out:
tun_put(tun);
tun->group = -1;
dev->ethtool_ops = &tun_ethtool_ops;
- dev->destructor = free_netdev;
+ dev->destructor = tun_free_netdev;
}
/* Trivial set of netlink ops to allow deleting tun or tap
.validate = tun_validate,
};
+static void tun_sock_write_space(struct sock *sk)
+{
+ struct tun_struct *tun;
+
+ if (!sock_writeable(sk))
+ return;
+
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_sync(sk->sk_sleep);
+
+ if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
+ return;
+
+ tun = container_of(sk, struct tun_sock, sk)->tun;
+ kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
+}
+
+static void tun_sock_destruct(struct sock *sk)
+{
+ free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
+}
+
+static struct proto tun_proto = {
+ .name = "tun",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct tun_sock),
+};
static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
+ struct sock *sk;
struct tun_struct *tun;
struct net_device *dev;
int err;
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
+ if (ifr->ifr_flags & IFF_TUN_EXCL)
+ return -EBUSY;
if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
tun = netdev_priv(dev);
else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
tun->flags = flags;
tun->txflt.count = 0;
+ err = -ENOMEM;
+ sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
+ if (!sk)
+ goto err_free_dev;
+
+ init_waitqueue_head(&tun->socket.wait);
+ sock_init_data(&tun->socket, sk);
+ sk->sk_write_space = tun_sock_write_space;
+ sk->sk_sndbuf = INT_MAX;
+
+ tun->sk = sk;
+ container_of(sk, struct tun_sock, sk)->tun = tun;
+
tun_net_init(dev);
if (strchr(dev->name, '%')) {
err = dev_alloc_name(dev, dev->name);
if (err < 0)
- goto err_free_dev;
+ goto err_free_sk;
}
+ err = -EINVAL;
err = register_netdevice(tun->dev);
if (err < 0)
- goto err_free_dev;
+ goto err_free_sk;
+
+ sk->sk_destruct = tun_sock_destruct;
err = tun_attach(tun, file);
if (err < 0)
- goto err_free_dev;
+ goto failed;
}
DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
+ err_free_sk:
+ sock_put(sk);
err_free_dev:
free_netdev(dev);
failed:
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
struct ifreq ifr;
+ int sndbuf;
int ret;
if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
rtnl_unlock();
break;
+
+ case TUNGETSNDBUF:
+ sndbuf = tun->sk->sk_sndbuf;
+ if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
+ ret = -EFAULT;
+ break;
+
+ case TUNSETSNDBUF:
+ if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ tun->sk->sk_sndbuf = sndbuf;
+ break;
+
default:
ret = -EINVAL;
break;
atomic_set(&tfile->count, 0);
tfile->tun = NULL;
tfile->net = get_net(current->nsproxy->net_ns);
- init_waitqueue_head(&tfile->read_wait);
file->private_data = tfile;
return 0;
}
rtnl_unlock();
}
+ tun = tfile->tun;
+ if (tun)
+ sock_put(tun->sk);
+
put_net(tfile->net);
kfree(tfile);