virtio-net: fix data corruption with OOM
[safe/jmp/linux-2.6] / drivers / net / virtio_net.c
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_ids.h>
26 #include <linux/virtio_net.h>
27 #include <linux/scatterlist.h>
28 #include <linux/if_vlan.h>
29
30 static int napi_weight = 128;
31 module_param(napi_weight, int, 0444);
32
33 static int csum = 1, gso = 1;
34 module_param(csum, bool, 0444);
35 module_param(gso, bool, 0444);
36
37 /* FIXME: MTU in config. */
38 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
39 #define GOOD_COPY_LEN   128
40
41 #define VIRTNET_SEND_COMMAND_SG_MAX    2
42
43 struct virtnet_info
44 {
45         struct virtio_device *vdev;
46         struct virtqueue *rvq, *svq, *cvq;
47         struct net_device *dev;
48         struct napi_struct napi;
49         unsigned int status;
50
51         /* Number of input buffers, and max we've ever had. */
52         unsigned int num, max;
53
54         /* I like... big packets and I cannot lie! */
55         bool big_packets;
56
57         /* Host will merge rx buffers for big packets (shake it! shake it!) */
58         bool mergeable_rx_bufs;
59
60         /* Receive & send queues. */
61         struct sk_buff_head recv;
62         struct sk_buff_head send;
63
64         /* Work struct for refilling if we run low on memory. */
65         struct delayed_work refill;
66
67         /* Chain pages by the private ptr. */
68         struct page *pages;
69 };
70
71 struct skb_vnet_hdr {
72         union {
73                 struct virtio_net_hdr hdr;
74                 struct virtio_net_hdr_mrg_rxbuf mhdr;
75         };
76         unsigned int num_sg;
77 };
78
79 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
80 {
81         return (struct skb_vnet_hdr *)skb->cb;
82 }
83
84 static void give_a_page(struct virtnet_info *vi, struct page *page)
85 {
86         page->private = (unsigned long)vi->pages;
87         vi->pages = page;
88 }
89
90 static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
91 {
92         unsigned int i;
93
94         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
95                 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
96         skb_shinfo(skb)->nr_frags = 0;
97         skb->data_len = 0;
98 }
99
100 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
101 {
102         struct page *p = vi->pages;
103
104         if (p)
105                 vi->pages = (struct page *)p->private;
106         else
107                 p = alloc_page(gfp_mask);
108         return p;
109 }
110
111 static void skb_xmit_done(struct virtqueue *svq)
112 {
113         struct virtnet_info *vi = svq->vdev->priv;
114
115         /* Suppress further interrupts. */
116         svq->vq_ops->disable_cb(svq);
117
118         /* We were probably waiting for more output buffers. */
119         netif_wake_queue(vi->dev);
120 }
121
122 static void receive_skb(struct net_device *dev, struct sk_buff *skb,
123                         unsigned len)
124 {
125         struct virtnet_info *vi = netdev_priv(dev);
126         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
127         int err;
128         int i;
129
130         if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
131                 pr_debug("%s: short packet %i\n", dev->name, len);
132                 dev->stats.rx_length_errors++;
133                 goto drop;
134         }
135
136         if (vi->mergeable_rx_bufs) {
137                 unsigned int copy;
138                 char *p = page_address(skb_shinfo(skb)->frags[0].page);
139
140                 if (len > PAGE_SIZE)
141                         len = PAGE_SIZE;
142                 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
143
144                 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
145                 p += sizeof(hdr->mhdr);
146
147                 copy = len;
148                 if (copy > skb_tailroom(skb))
149                         copy = skb_tailroom(skb);
150
151                 memcpy(skb_put(skb, copy), p, copy);
152
153                 len -= copy;
154
155                 if (!len) {
156                         give_a_page(vi, skb_shinfo(skb)->frags[0].page);
157                         skb_shinfo(skb)->nr_frags--;
158                 } else {
159                         skb_shinfo(skb)->frags[0].page_offset +=
160                                 sizeof(hdr->mhdr) + copy;
161                         skb_shinfo(skb)->frags[0].size = len;
162                         skb->data_len += len;
163                         skb->len += len;
164                 }
165
166                 while (--hdr->mhdr.num_buffers) {
167                         struct sk_buff *nskb;
168
169                         i = skb_shinfo(skb)->nr_frags;
170                         if (i >= MAX_SKB_FRAGS) {
171                                 pr_debug("%s: packet too long %d\n", dev->name,
172                                          len);
173                                 dev->stats.rx_length_errors++;
174                                 goto drop;
175                         }
176
177                         nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
178                         if (!nskb) {
179                                 pr_debug("%s: rx error: %d buffers missing\n",
180                                          dev->name, hdr->mhdr.num_buffers);
181                                 dev->stats.rx_length_errors++;
182                                 goto drop;
183                         }
184
185                         __skb_unlink(nskb, &vi->recv);
186                         vi->num--;
187
188                         skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
189                         skb_shinfo(nskb)->nr_frags = 0;
190                         kfree_skb(nskb);
191
192                         if (len > PAGE_SIZE)
193                                 len = PAGE_SIZE;
194
195                         skb_shinfo(skb)->frags[i].size = len;
196                         skb_shinfo(skb)->nr_frags++;
197                         skb->data_len += len;
198                         skb->len += len;
199                 }
200         } else {
201                 len -= sizeof(hdr->hdr);
202
203                 if (len <= MAX_PACKET_LEN)
204                         trim_pages(vi, skb);
205
206                 err = pskb_trim(skb, len);
207                 if (err) {
208                         pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
209                                  len, err);
210                         dev->stats.rx_dropped++;
211                         goto drop;
212                 }
213         }
214
215         skb->truesize += skb->data_len;
216         dev->stats.rx_bytes += skb->len;
217         dev->stats.rx_packets++;
218
219         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
220                 pr_debug("Needs csum!\n");
221                 if (!skb_partial_csum_set(skb,
222                                           hdr->hdr.csum_start,
223                                           hdr->hdr.csum_offset))
224                         goto frame_err;
225         }
226
227         skb->protocol = eth_type_trans(skb, dev);
228         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
229                  ntohs(skb->protocol), skb->len, skb->pkt_type);
230
231         if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
232                 pr_debug("GSO!\n");
233                 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
234                 case VIRTIO_NET_HDR_GSO_TCPV4:
235                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
236                         break;
237                 case VIRTIO_NET_HDR_GSO_UDP:
238                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
239                         break;
240                 case VIRTIO_NET_HDR_GSO_TCPV6:
241                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
242                         break;
243                 default:
244                         if (net_ratelimit())
245                                 printk(KERN_WARNING "%s: bad gso type %u.\n",
246                                        dev->name, hdr->hdr.gso_type);
247                         goto frame_err;
248                 }
249
250                 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
251                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
252
253                 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
254                 if (skb_shinfo(skb)->gso_size == 0) {
255                         if (net_ratelimit())
256                                 printk(KERN_WARNING "%s: zero gso size.\n",
257                                        dev->name);
258                         goto frame_err;
259                 }
260
261                 /* Header must be checked, and gso_segs computed. */
262                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
263                 skb_shinfo(skb)->gso_segs = 0;
264         }
265
266         netif_receive_skb(skb);
267         return;
268
269 frame_err:
270         dev->stats.rx_frame_errors++;
271 drop:
272         dev_kfree_skb(skb);
273 }
274
275 static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
276 {
277         struct sk_buff *skb;
278         struct scatterlist sg[2+MAX_SKB_FRAGS];
279         int num, err, i;
280         bool oom = false;
281
282         sg_init_table(sg, 2+MAX_SKB_FRAGS);
283         do {
284                 struct skb_vnet_hdr *hdr;
285
286                 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
287                 if (unlikely(!skb)) {
288                         oom = true;
289                         break;
290                 }
291
292                 skb_reserve(skb, NET_IP_ALIGN);
293                 skb_put(skb, MAX_PACKET_LEN);
294
295                 hdr = skb_vnet_hdr(skb);
296                 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
297
298                 if (vi->big_packets) {
299                         for (i = 0; i < MAX_SKB_FRAGS; i++) {
300                                 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
301                                 f->page = get_a_page(vi, gfp);
302                                 if (!f->page)
303                                         break;
304
305                                 f->page_offset = 0;
306                                 f->size = PAGE_SIZE;
307
308                                 skb->data_len += PAGE_SIZE;
309                                 skb->len += PAGE_SIZE;
310
311                                 skb_shinfo(skb)->nr_frags++;
312                         }
313                 }
314
315                 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
316                 skb_queue_head(&vi->recv, skb);
317
318                 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
319                 if (err < 0) {
320                         skb_unlink(skb, &vi->recv);
321                         trim_pages(vi, skb);
322                         kfree_skb(skb);
323                         break;
324                 }
325                 vi->num++;
326         } while (err >= num);
327         if (unlikely(vi->num > vi->max))
328                 vi->max = vi->num;
329         vi->rvq->vq_ops->kick(vi->rvq);
330         return !oom;
331 }
332
333 /* Returns false if we couldn't fill entirely (OOM). */
334 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
335 {
336         struct sk_buff *skb;
337         struct scatterlist sg[1];
338         int err;
339         bool oom = false;
340
341         if (!vi->mergeable_rx_bufs)
342                 return try_fill_recv_maxbufs(vi, gfp);
343
344         do {
345                 skb_frag_t *f;
346
347                 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
348                 if (unlikely(!skb)) {
349                         oom = true;
350                         break;
351                 }
352
353                 skb_reserve(skb, NET_IP_ALIGN);
354
355                 f = &skb_shinfo(skb)->frags[0];
356                 f->page = get_a_page(vi, gfp);
357                 if (!f->page) {
358                         oom = true;
359                         kfree_skb(skb);
360                         break;
361                 }
362
363                 f->page_offset = 0;
364                 f->size = PAGE_SIZE;
365
366                 skb_shinfo(skb)->nr_frags++;
367
368                 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
369                 skb_queue_head(&vi->recv, skb);
370
371                 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
372                 if (err < 0) {
373                         skb_unlink(skb, &vi->recv);
374                         kfree_skb(skb);
375                         break;
376                 }
377                 vi->num++;
378         } while (err > 0);
379         if (unlikely(vi->num > vi->max))
380                 vi->max = vi->num;
381         vi->rvq->vq_ops->kick(vi->rvq);
382         return !oom;
383 }
384
385 static void skb_recv_done(struct virtqueue *rvq)
386 {
387         struct virtnet_info *vi = rvq->vdev->priv;
388         /* Schedule NAPI, Suppress further interrupts if successful. */
389         if (napi_schedule_prep(&vi->napi)) {
390                 rvq->vq_ops->disable_cb(rvq);
391                 __napi_schedule(&vi->napi);
392         }
393 }
394
395 static void refill_work(struct work_struct *work)
396 {
397         struct virtnet_info *vi;
398         bool still_empty;
399
400         vi = container_of(work, struct virtnet_info, refill.work);
401         napi_disable(&vi->napi);
402         try_fill_recv(vi, GFP_KERNEL);
403         still_empty = (vi->num == 0);
404         napi_enable(&vi->napi);
405
406         /* In theory, this can happen: if we don't get any buffers in
407          * we will *never* try to fill again. */
408         if (still_empty)
409                 schedule_delayed_work(&vi->refill, HZ/2);
410 }
411
412 static int virtnet_poll(struct napi_struct *napi, int budget)
413 {
414         struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
415         struct sk_buff *skb = NULL;
416         unsigned int len, received = 0;
417
418 again:
419         while (received < budget &&
420                (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
421                 __skb_unlink(skb, &vi->recv);
422                 receive_skb(vi->dev, skb, len);
423                 vi->num--;
424                 received++;
425         }
426
427         if (vi->num < vi->max / 2) {
428                 if (!try_fill_recv(vi, GFP_ATOMIC))
429                         schedule_delayed_work(&vi->refill, 0);
430         }
431
432         /* Out of packets? */
433         if (received < budget) {
434                 napi_complete(napi);
435                 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
436                     && napi_schedule_prep(napi)) {
437                         vi->rvq->vq_ops->disable_cb(vi->rvq);
438                         __napi_schedule(napi);
439                         goto again;
440                 }
441         }
442
443         return received;
444 }
445
446 static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
447 {
448         struct sk_buff *skb;
449         unsigned int len, tot_sgs = 0;
450
451         while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
452                 pr_debug("Sent skb %p\n", skb);
453                 __skb_unlink(skb, &vi->send);
454                 vi->dev->stats.tx_bytes += skb->len;
455                 vi->dev->stats.tx_packets++;
456                 tot_sgs += skb_vnet_hdr(skb)->num_sg;
457                 dev_kfree_skb_any(skb);
458         }
459         return tot_sgs;
460 }
461
462 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
463 {
464         struct scatterlist sg[2+MAX_SKB_FRAGS];
465         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
466         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
467
468         sg_init_table(sg, 2+MAX_SKB_FRAGS);
469
470         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
471
472         if (skb->ip_summed == CHECKSUM_PARTIAL) {
473                 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
474                 hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
475                 hdr->hdr.csum_offset = skb->csum_offset;
476         } else {
477                 hdr->hdr.flags = 0;
478                 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
479         }
480
481         if (skb_is_gso(skb)) {
482                 hdr->hdr.hdr_len = skb_headlen(skb);
483                 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
484                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
485                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
486                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
487                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
488                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
489                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
490                 else
491                         BUG();
492                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
493                         hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
494         } else {
495                 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
496                 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
497         }
498
499         hdr->mhdr.num_buffers = 0;
500
501         /* Encode metadata header at front. */
502         if (vi->mergeable_rx_bufs)
503                 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
504         else
505                 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
506
507         hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
508         return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
509 }
510
511 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
512 {
513         struct virtnet_info *vi = netdev_priv(dev);
514         int capacity;
515
516 again:
517         /* Free up any pending old buffers before queueing new ones. */
518         free_old_xmit_skbs(vi);
519
520         /* Try to transmit */
521         capacity = xmit_skb(vi, skb);
522
523         /* This can happen with OOM and indirect buffers. */
524         if (unlikely(capacity < 0)) {
525                 netif_stop_queue(dev);
526                 dev_warn(&dev->dev, "Unexpected full queue\n");
527                 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
528                         vi->svq->vq_ops->disable_cb(vi->svq);
529                         netif_start_queue(dev);
530                         goto again;
531                 }
532                 return NETDEV_TX_BUSY;
533         }
534         vi->svq->vq_ops->kick(vi->svq);
535
536         /*
537          * Put new one in send queue.  You'd expect we'd need this before
538          * xmit_skb calls add_buf(), since the callback can be triggered
539          * immediately after that.  But since the callback just triggers
540          * another call back here, normal network xmit locking prevents the
541          * race.
542          */
543         __skb_queue_head(&vi->send, skb);
544
545         /* Don't wait up for transmitted skbs to be freed. */
546         skb_orphan(skb);
547         nf_reset(skb);
548
549         /* Apparently nice girls don't return TX_BUSY; stop the queue
550          * before it gets out of hand.  Naturally, this wastes entries. */
551         if (capacity < 2+MAX_SKB_FRAGS) {
552                 netif_stop_queue(dev);
553                 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
554                         /* More just got used, free them then recheck. */
555                         capacity += free_old_xmit_skbs(vi);
556                         if (capacity >= 2+MAX_SKB_FRAGS) {
557                                 netif_start_queue(dev);
558                                 vi->svq->vq_ops->disable_cb(vi->svq);
559                         }
560                 }
561         }
562
563         return NETDEV_TX_OK;
564 }
565
566 static int virtnet_set_mac_address(struct net_device *dev, void *p)
567 {
568         struct virtnet_info *vi = netdev_priv(dev);
569         struct virtio_device *vdev = vi->vdev;
570         int ret;
571
572         ret = eth_mac_addr(dev, p);
573         if (ret)
574                 return ret;
575
576         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
577                 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
578                                   dev->dev_addr, dev->addr_len);
579
580         return 0;
581 }
582
583 #ifdef CONFIG_NET_POLL_CONTROLLER
584 static void virtnet_netpoll(struct net_device *dev)
585 {
586         struct virtnet_info *vi = netdev_priv(dev);
587
588         napi_schedule(&vi->napi);
589 }
590 #endif
591
592 static int virtnet_open(struct net_device *dev)
593 {
594         struct virtnet_info *vi = netdev_priv(dev);
595
596         napi_enable(&vi->napi);
597
598         /* If all buffers were filled by other side before we napi_enabled, we
599          * won't get another interrupt, so process any outstanding packets
600          * now.  virtnet_poll wants re-enable the queue, so we disable here.
601          * We synchronize against interrupts via NAPI_STATE_SCHED */
602         if (napi_schedule_prep(&vi->napi)) {
603                 vi->rvq->vq_ops->disable_cb(vi->rvq);
604                 __napi_schedule(&vi->napi);
605         }
606         return 0;
607 }
608
609 /*
610  * Send command via the control virtqueue and check status.  Commands
611  * supported by the hypervisor, as indicated by feature bits, should
612  * never fail unless improperly formated.
613  */
614 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
615                                  struct scatterlist *data, int out, int in)
616 {
617         struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
618         struct virtio_net_ctrl_hdr ctrl;
619         virtio_net_ctrl_ack status = ~0;
620         unsigned int tmp;
621         int i;
622
623         /* Caller should know better */
624         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
625                 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
626
627         out++; /* Add header */
628         in++; /* Add return status */
629
630         ctrl.class = class;
631         ctrl.cmd = cmd;
632
633         sg_init_table(sg, out + in);
634
635         sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
636         for_each_sg(data, s, out + in - 2, i)
637                 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
638         sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
639
640         BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
641
642         vi->cvq->vq_ops->kick(vi->cvq);
643
644         /*
645          * Spin for a response, the kick causes an ioport write, trapping
646          * into the hypervisor, so the request should be handled immediately.
647          */
648         while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
649                 cpu_relax();
650
651         return status == VIRTIO_NET_OK;
652 }
653
654 static int virtnet_close(struct net_device *dev)
655 {
656         struct virtnet_info *vi = netdev_priv(dev);
657
658         napi_disable(&vi->napi);
659
660         return 0;
661 }
662
663 static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
664 {
665         struct virtnet_info *vi = netdev_priv(dev);
666         struct virtio_device *vdev = vi->vdev;
667
668         if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
669                 return -ENOSYS;
670
671         return ethtool_op_set_tx_hw_csum(dev, data);
672 }
673
674 static void virtnet_set_rx_mode(struct net_device *dev)
675 {
676         struct virtnet_info *vi = netdev_priv(dev);
677         struct scatterlist sg[2];
678         u8 promisc, allmulti;
679         struct virtio_net_ctrl_mac *mac_data;
680         struct dev_addr_list *addr;
681         struct netdev_hw_addr *ha;
682         void *buf;
683         int i;
684
685         /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
686         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
687                 return;
688
689         promisc = ((dev->flags & IFF_PROMISC) != 0);
690         allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
691
692         sg_init_one(sg, &promisc, sizeof(promisc));
693
694         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
695                                   VIRTIO_NET_CTRL_RX_PROMISC,
696                                   sg, 1, 0))
697                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
698                          promisc ? "en" : "dis");
699
700         sg_init_one(sg, &allmulti, sizeof(allmulti));
701
702         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
703                                   VIRTIO_NET_CTRL_RX_ALLMULTI,
704                                   sg, 1, 0))
705                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
706                          allmulti ? "en" : "dis");
707
708         /* MAC filter - use one buffer for both lists */
709         mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
710                                  (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
711         if (!buf) {
712                 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
713                 return;
714         }
715
716         sg_init_table(sg, 2);
717
718         /* Store the unicast list and count in the front of the buffer */
719         mac_data->entries = dev->uc.count;
720         i = 0;
721         list_for_each_entry(ha, &dev->uc.list, list)
722                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
723
724         sg_set_buf(&sg[0], mac_data,
725                    sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
726
727         /* multicast list and count fill the end */
728         mac_data = (void *)&mac_data->macs[dev->uc.count][0];
729
730         mac_data->entries = dev->mc_count;
731         addr = dev->mc_list;
732         for (i = 0; i < dev->mc_count; i++, addr = addr->next)
733                 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
734
735         sg_set_buf(&sg[1], mac_data,
736                    sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
737
738         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
739                                   VIRTIO_NET_CTRL_MAC_TABLE_SET,
740                                   sg, 2, 0))
741                 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
742
743         kfree(buf);
744 }
745
746 static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
747 {
748         struct virtnet_info *vi = netdev_priv(dev);
749         struct scatterlist sg;
750
751         sg_init_one(&sg, &vid, sizeof(vid));
752
753         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
754                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
755                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
756 }
757
758 static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
759 {
760         struct virtnet_info *vi = netdev_priv(dev);
761         struct scatterlist sg;
762
763         sg_init_one(&sg, &vid, sizeof(vid));
764
765         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
766                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
767                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
768 }
769
770 static const struct ethtool_ops virtnet_ethtool_ops = {
771         .set_tx_csum = virtnet_set_tx_csum,
772         .set_sg = ethtool_op_set_sg,
773         .set_tso = ethtool_op_set_tso,
774         .set_ufo = ethtool_op_set_ufo,
775         .get_link = ethtool_op_get_link,
776 };
777
778 #define MIN_MTU 68
779 #define MAX_MTU 65535
780
781 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
782 {
783         if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
784                 return -EINVAL;
785         dev->mtu = new_mtu;
786         return 0;
787 }
788
789 static const struct net_device_ops virtnet_netdev = {
790         .ndo_open            = virtnet_open,
791         .ndo_stop            = virtnet_close,
792         .ndo_start_xmit      = start_xmit,
793         .ndo_validate_addr   = eth_validate_addr,
794         .ndo_set_mac_address = virtnet_set_mac_address,
795         .ndo_set_rx_mode     = virtnet_set_rx_mode,
796         .ndo_change_mtu      = virtnet_change_mtu,
797         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
798         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
799 #ifdef CONFIG_NET_POLL_CONTROLLER
800         .ndo_poll_controller = virtnet_netpoll,
801 #endif
802 };
803
804 static void virtnet_update_status(struct virtnet_info *vi)
805 {
806         u16 v;
807
808         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
809                 return;
810
811         vi->vdev->config->get(vi->vdev,
812                               offsetof(struct virtio_net_config, status),
813                               &v, sizeof(v));
814
815         /* Ignore unknown (future) status bits */
816         v &= VIRTIO_NET_S_LINK_UP;
817
818         if (vi->status == v)
819                 return;
820
821         vi->status = v;
822
823         if (vi->status & VIRTIO_NET_S_LINK_UP) {
824                 netif_carrier_on(vi->dev);
825                 netif_wake_queue(vi->dev);
826         } else {
827                 netif_carrier_off(vi->dev);
828                 netif_stop_queue(vi->dev);
829         }
830 }
831
832 static void virtnet_config_changed(struct virtio_device *vdev)
833 {
834         struct virtnet_info *vi = vdev->priv;
835
836         virtnet_update_status(vi);
837 }
838
839 static int virtnet_probe(struct virtio_device *vdev)
840 {
841         int err;
842         struct net_device *dev;
843         struct virtnet_info *vi;
844         struct virtqueue *vqs[3];
845         vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
846         const char *names[] = { "input", "output", "control" };
847         int nvqs;
848
849         /* Allocate ourselves a network device with room for our info */
850         dev = alloc_etherdev(sizeof(struct virtnet_info));
851         if (!dev)
852                 return -ENOMEM;
853
854         /* Set up network device as normal. */
855         dev->netdev_ops = &virtnet_netdev;
856         dev->features = NETIF_F_HIGHDMA;
857         SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
858         SET_NETDEV_DEV(dev, &vdev->dev);
859
860         /* Do we support "hardware" checksums? */
861         if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
862                 /* This opens up the world of extra features. */
863                 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
864                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
865                         dev->features |= NETIF_F_TSO | NETIF_F_UFO
866                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
867                 }
868                 /* Individual feature bits: what can host handle? */
869                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
870                         dev->features |= NETIF_F_TSO;
871                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
872                         dev->features |= NETIF_F_TSO6;
873                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
874                         dev->features |= NETIF_F_TSO_ECN;
875                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
876                         dev->features |= NETIF_F_UFO;
877         }
878
879         /* Configuration may specify what MAC to use.  Otherwise random. */
880         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
881                 vdev->config->get(vdev,
882                                   offsetof(struct virtio_net_config, mac),
883                                   dev->dev_addr, dev->addr_len);
884         } else
885                 random_ether_addr(dev->dev_addr);
886
887         /* Set up our device-specific information */
888         vi = netdev_priv(dev);
889         netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
890         vi->dev = dev;
891         vi->vdev = vdev;
892         vdev->priv = vi;
893         vi->pages = NULL;
894         INIT_DELAYED_WORK(&vi->refill, refill_work);
895
896         /* If we can receive ANY GSO packets, we must allocate large ones. */
897         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
898             || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
899             || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
900                 vi->big_packets = true;
901
902         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
903                 vi->mergeable_rx_bufs = true;
904
905         /* We expect two virtqueues, receive then send,
906          * and optionally control. */
907         nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
908
909         err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
910         if (err)
911                 goto free;
912
913         vi->rvq = vqs[0];
914         vi->svq = vqs[1];
915
916         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
917                 vi->cvq = vqs[2];
918
919                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
920                         dev->features |= NETIF_F_HW_VLAN_FILTER;
921         }
922
923         /* Initialize our empty receive and send queues. */
924         skb_queue_head_init(&vi->recv);
925         skb_queue_head_init(&vi->send);
926
927         err = register_netdev(dev);
928         if (err) {
929                 pr_debug("virtio_net: registering device failed\n");
930                 goto free_vqs;
931         }
932
933         /* Last of all, set up some receive buffers. */
934         try_fill_recv(vi, GFP_KERNEL);
935
936         /* If we didn't even get one input buffer, we're useless. */
937         if (vi->num == 0) {
938                 err = -ENOMEM;
939                 goto unregister;
940         }
941
942         vi->status = VIRTIO_NET_S_LINK_UP;
943         virtnet_update_status(vi);
944         netif_carrier_on(dev);
945
946         pr_debug("virtnet: registered device %s\n", dev->name);
947         return 0;
948
949 unregister:
950         unregister_netdev(dev);
951         cancel_delayed_work_sync(&vi->refill);
952 free_vqs:
953         vdev->config->del_vqs(vdev);
954 free:
955         free_netdev(dev);
956         return err;
957 }
958
959 static void __devexit virtnet_remove(struct virtio_device *vdev)
960 {
961         struct virtnet_info *vi = vdev->priv;
962         struct sk_buff *skb;
963
964         /* Stop all the virtqueues. */
965         vdev->config->reset(vdev);
966
967         /* Free our skbs in send and recv queues, if any. */
968         while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
969                 kfree_skb(skb);
970                 vi->num--;
971         }
972         __skb_queue_purge(&vi->send);
973
974         BUG_ON(vi->num != 0);
975
976         unregister_netdev(vi->dev);
977         cancel_delayed_work_sync(&vi->refill);
978
979         vdev->config->del_vqs(vi->vdev);
980
981         while (vi->pages)
982                 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
983
984         free_netdev(vi->dev);
985 }
986
987 static struct virtio_device_id id_table[] = {
988         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
989         { 0 },
990 };
991
992 static unsigned int features[] = {
993         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
994         VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
995         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
996         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
997         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
998         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
999         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1000 };
1001
1002 static struct virtio_driver virtio_net = {
1003         .feature_table = features,
1004         .feature_table_size = ARRAY_SIZE(features),
1005         .driver.name =  KBUILD_MODNAME,
1006         .driver.owner = THIS_MODULE,
1007         .id_table =     id_table,
1008         .probe =        virtnet_probe,
1009         .remove =       __devexit_p(virtnet_remove),
1010         .config_changed = virtnet_config_changed,
1011 };
1012
1013 static int __init init(void)
1014 {
1015         return register_virtio_driver(&virtio_net);
1016 }
1017
1018 static void __exit fini(void)
1019 {
1020         unregister_virtio_driver(&virtio_net);
1021 }
1022 module_init(init);
1023 module_exit(fini);
1024
1025 MODULE_DEVICE_TABLE(virtio, id_table);
1026 MODULE_DESCRIPTION("Virtio network driver");
1027 MODULE_LICENSE("GPL");