Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[safe/jmp/linux-2.6] / drivers / net / virtio_net.c
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_ids.h>
26 #include <linux/virtio_net.h>
27 #include <linux/scatterlist.h>
28 #include <linux/if_vlan.h>
29
30 static int napi_weight = 128;
31 module_param(napi_weight, int, 0444);
32
33 static int csum = 1, gso = 1;
34 module_param(csum, bool, 0444);
35 module_param(gso, bool, 0444);
36
37 /* FIXME: MTU in config. */
38 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
39 #define GOOD_COPY_LEN   128
40
41 #define VIRTNET_SEND_COMMAND_SG_MAX    2
42
43 struct virtnet_info
44 {
45         struct virtio_device *vdev;
46         struct virtqueue *rvq, *svq, *cvq;
47         struct net_device *dev;
48         struct napi_struct napi;
49         unsigned int status;
50
51         /* Number of input buffers, and max we've ever had. */
52         unsigned int num, max;
53
54         /* I like... big packets and I cannot lie! */
55         bool big_packets;
56
57         /* Host will merge rx buffers for big packets (shake it! shake it!) */
58         bool mergeable_rx_bufs;
59
60         /* Receive & send queues. */
61         struct sk_buff_head recv;
62         struct sk_buff_head send;
63
64         /* Work struct for refilling if we run low on memory. */
65         struct delayed_work refill;
66
67         /* Chain pages by the private ptr. */
68         struct page *pages;
69 };
70
71 struct skb_vnet_hdr {
72         union {
73                 struct virtio_net_hdr hdr;
74                 struct virtio_net_hdr_mrg_rxbuf mhdr;
75         };
76         unsigned int num_sg;
77 };
78
79 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
80 {
81         return (struct skb_vnet_hdr *)skb->cb;
82 }
83
84 static void give_a_page(struct virtnet_info *vi, struct page *page)
85 {
86         page->private = (unsigned long)vi->pages;
87         vi->pages = page;
88 }
89
90 static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
91 {
92         unsigned int i;
93
94         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
95                 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
96         skb_shinfo(skb)->nr_frags = 0;
97         skb->data_len = 0;
98 }
99
100 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
101 {
102         struct page *p = vi->pages;
103
104         if (p)
105                 vi->pages = (struct page *)p->private;
106         else
107                 p = alloc_page(gfp_mask);
108         return p;
109 }
110
111 static void skb_xmit_done(struct virtqueue *svq)
112 {
113         struct virtnet_info *vi = svq->vdev->priv;
114
115         /* Suppress further interrupts. */
116         svq->vq_ops->disable_cb(svq);
117
118         /* We were probably waiting for more output buffers. */
119         netif_wake_queue(vi->dev);
120 }
121
122 static void receive_skb(struct net_device *dev, struct sk_buff *skb,
123                         unsigned len)
124 {
125         struct virtnet_info *vi = netdev_priv(dev);
126         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
127         int err;
128         int i;
129
130         if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
131                 pr_debug("%s: short packet %i\n", dev->name, len);
132                 dev->stats.rx_length_errors++;
133                 goto drop;
134         }
135
136         if (vi->mergeable_rx_bufs) {
137                 unsigned int copy;
138                 char *p = page_address(skb_shinfo(skb)->frags[0].page);
139
140                 if (len > PAGE_SIZE)
141                         len = PAGE_SIZE;
142                 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
143
144                 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
145                 p += sizeof(hdr->mhdr);
146
147                 copy = len;
148                 if (copy > skb_tailroom(skb))
149                         copy = skb_tailroom(skb);
150
151                 memcpy(skb_put(skb, copy), p, copy);
152
153                 len -= copy;
154
155                 if (!len) {
156                         give_a_page(vi, skb_shinfo(skb)->frags[0].page);
157                         skb_shinfo(skb)->nr_frags--;
158                 } else {
159                         skb_shinfo(skb)->frags[0].page_offset +=
160                                 sizeof(hdr->mhdr) + copy;
161                         skb_shinfo(skb)->frags[0].size = len;
162                         skb->data_len += len;
163                         skb->len += len;
164                 }
165
166                 while (--hdr->mhdr.num_buffers) {
167                         struct sk_buff *nskb;
168
169                         i = skb_shinfo(skb)->nr_frags;
170                         if (i >= MAX_SKB_FRAGS) {
171                                 pr_debug("%s: packet too long %d\n", dev->name,
172                                          len);
173                                 dev->stats.rx_length_errors++;
174                                 goto drop;
175                         }
176
177                         nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
178                         if (!nskb) {
179                                 pr_debug("%s: rx error: %d buffers missing\n",
180                                          dev->name, hdr->mhdr.num_buffers);
181                                 dev->stats.rx_length_errors++;
182                                 goto drop;
183                         }
184
185                         __skb_unlink(nskb, &vi->recv);
186                         vi->num--;
187
188                         skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
189                         skb_shinfo(nskb)->nr_frags = 0;
190                         kfree_skb(nskb);
191
192                         if (len > PAGE_SIZE)
193                                 len = PAGE_SIZE;
194
195                         skb_shinfo(skb)->frags[i].size = len;
196                         skb_shinfo(skb)->nr_frags++;
197                         skb->data_len += len;
198                         skb->len += len;
199                 }
200         } else {
201                 len -= sizeof(hdr->hdr);
202
203                 if (len <= MAX_PACKET_LEN)
204                         trim_pages(vi, skb);
205
206                 err = pskb_trim(skb, len);
207                 if (err) {
208                         pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
209                                  len, err);
210                         dev->stats.rx_dropped++;
211                         goto drop;
212                 }
213         }
214
215         skb->truesize += skb->data_len;
216         dev->stats.rx_bytes += skb->len;
217         dev->stats.rx_packets++;
218
219         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
220                 pr_debug("Needs csum!\n");
221                 if (!skb_partial_csum_set(skb,
222                                           hdr->hdr.csum_start,
223                                           hdr->hdr.csum_offset))
224                         goto frame_err;
225         }
226
227         skb->protocol = eth_type_trans(skb, dev);
228         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
229                  ntohs(skb->protocol), skb->len, skb->pkt_type);
230
231         if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
232                 pr_debug("GSO!\n");
233                 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
234                 case VIRTIO_NET_HDR_GSO_TCPV4:
235                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
236                         break;
237                 case VIRTIO_NET_HDR_GSO_UDP:
238                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
239                         break;
240                 case VIRTIO_NET_HDR_GSO_TCPV6:
241                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
242                         break;
243                 default:
244                         if (net_ratelimit())
245                                 printk(KERN_WARNING "%s: bad gso type %u.\n",
246                                        dev->name, hdr->hdr.gso_type);
247                         goto frame_err;
248                 }
249
250                 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
251                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
252
253                 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
254                 if (skb_shinfo(skb)->gso_size == 0) {
255                         if (net_ratelimit())
256                                 printk(KERN_WARNING "%s: zero gso size.\n",
257                                        dev->name);
258                         goto frame_err;
259                 }
260
261                 /* Header must be checked, and gso_segs computed. */
262                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
263                 skb_shinfo(skb)->gso_segs = 0;
264         }
265
266         netif_receive_skb(skb);
267         return;
268
269 frame_err:
270         dev->stats.rx_frame_errors++;
271 drop:
272         dev_kfree_skb(skb);
273 }
274
275 static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
276 {
277         struct sk_buff *skb;
278         struct scatterlist sg[2+MAX_SKB_FRAGS];
279         int num, err, i;
280         bool oom = false;
281
282         sg_init_table(sg, 2+MAX_SKB_FRAGS);
283         do {
284                 struct skb_vnet_hdr *hdr;
285
286                 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
287                 if (unlikely(!skb)) {
288                         oom = true;
289                         break;
290                 }
291
292                 skb_put(skb, MAX_PACKET_LEN);
293
294                 hdr = skb_vnet_hdr(skb);
295                 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
296
297                 if (vi->big_packets) {
298                         for (i = 0; i < MAX_SKB_FRAGS; i++) {
299                                 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
300                                 f->page = get_a_page(vi, gfp);
301                                 if (!f->page)
302                                         break;
303
304                                 f->page_offset = 0;
305                                 f->size = PAGE_SIZE;
306
307                                 skb->data_len += PAGE_SIZE;
308                                 skb->len += PAGE_SIZE;
309
310                                 skb_shinfo(skb)->nr_frags++;
311                         }
312                 }
313
314                 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
315                 skb_queue_head(&vi->recv, skb);
316
317                 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
318                 if (err < 0) {
319                         skb_unlink(skb, &vi->recv);
320                         trim_pages(vi, skb);
321                         kfree_skb(skb);
322                         break;
323                 }
324                 vi->num++;
325         } while (err >= num);
326         if (unlikely(vi->num > vi->max))
327                 vi->max = vi->num;
328         vi->rvq->vq_ops->kick(vi->rvq);
329         return !oom;
330 }
331
332 /* Returns false if we couldn't fill entirely (OOM). */
333 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
334 {
335         struct sk_buff *skb;
336         struct scatterlist sg[1];
337         int err;
338         bool oom = false;
339
340         if (!vi->mergeable_rx_bufs)
341                 return try_fill_recv_maxbufs(vi, gfp);
342
343         do {
344                 skb_frag_t *f;
345
346                 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
347                 if (unlikely(!skb)) {
348                         oom = true;
349                         break;
350                 }
351
352                 f = &skb_shinfo(skb)->frags[0];
353                 f->page = get_a_page(vi, gfp);
354                 if (!f->page) {
355                         oom = true;
356                         kfree_skb(skb);
357                         break;
358                 }
359
360                 f->page_offset = 0;
361                 f->size = PAGE_SIZE;
362
363                 skb_shinfo(skb)->nr_frags++;
364
365                 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
366                 skb_queue_head(&vi->recv, skb);
367
368                 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
369                 if (err < 0) {
370                         skb_unlink(skb, &vi->recv);
371                         kfree_skb(skb);
372                         break;
373                 }
374                 vi->num++;
375         } while (err > 0);
376         if (unlikely(vi->num > vi->max))
377                 vi->max = vi->num;
378         vi->rvq->vq_ops->kick(vi->rvq);
379         return !oom;
380 }
381
382 static void skb_recv_done(struct virtqueue *rvq)
383 {
384         struct virtnet_info *vi = rvq->vdev->priv;
385         /* Schedule NAPI, Suppress further interrupts if successful. */
386         if (napi_schedule_prep(&vi->napi)) {
387                 rvq->vq_ops->disable_cb(rvq);
388                 __napi_schedule(&vi->napi);
389         }
390 }
391
392 static void refill_work(struct work_struct *work)
393 {
394         struct virtnet_info *vi;
395         bool still_empty;
396
397         vi = container_of(work, struct virtnet_info, refill.work);
398         napi_disable(&vi->napi);
399         try_fill_recv(vi, GFP_KERNEL);
400         still_empty = (vi->num == 0);
401         napi_enable(&vi->napi);
402
403         /* In theory, this can happen: if we don't get any buffers in
404          * we will *never* try to fill again. */
405         if (still_empty)
406                 schedule_delayed_work(&vi->refill, HZ/2);
407 }
408
409 static int virtnet_poll(struct napi_struct *napi, int budget)
410 {
411         struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
412         struct sk_buff *skb = NULL;
413         unsigned int len, received = 0;
414
415 again:
416         while (received < budget &&
417                (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
418                 __skb_unlink(skb, &vi->recv);
419                 receive_skb(vi->dev, skb, len);
420                 vi->num--;
421                 received++;
422         }
423
424         if (vi->num < vi->max / 2) {
425                 if (!try_fill_recv(vi, GFP_ATOMIC))
426                         schedule_delayed_work(&vi->refill, 0);
427         }
428
429         /* Out of packets? */
430         if (received < budget) {
431                 napi_complete(napi);
432                 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
433                     && napi_schedule_prep(napi)) {
434                         vi->rvq->vq_ops->disable_cb(vi->rvq);
435                         __napi_schedule(napi);
436                         goto again;
437                 }
438         }
439
440         return received;
441 }
442
443 static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
444 {
445         struct sk_buff *skb;
446         unsigned int len, tot_sgs = 0;
447
448         while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
449                 pr_debug("Sent skb %p\n", skb);
450                 __skb_unlink(skb, &vi->send);
451                 vi->dev->stats.tx_bytes += skb->len;
452                 vi->dev->stats.tx_packets++;
453                 tot_sgs += skb_vnet_hdr(skb)->num_sg;
454                 dev_kfree_skb_any(skb);
455         }
456         return tot_sgs;
457 }
458
459 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
460 {
461         struct scatterlist sg[2+MAX_SKB_FRAGS];
462         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
463         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
464
465         sg_init_table(sg, 2+MAX_SKB_FRAGS);
466
467         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
468
469         if (skb->ip_summed == CHECKSUM_PARTIAL) {
470                 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
471                 hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
472                 hdr->hdr.csum_offset = skb->csum_offset;
473         } else {
474                 hdr->hdr.flags = 0;
475                 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
476         }
477
478         if (skb_is_gso(skb)) {
479                 hdr->hdr.hdr_len = skb_headlen(skb);
480                 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
481                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
482                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
483                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
484                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
485                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
486                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
487                 else
488                         BUG();
489                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
490                         hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
491         } else {
492                 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
493                 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
494         }
495
496         hdr->mhdr.num_buffers = 0;
497
498         /* Encode metadata header at front. */
499         if (vi->mergeable_rx_bufs)
500                 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
501         else
502                 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
503
504         hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
505         return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
506 }
507
508 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
509 {
510         struct virtnet_info *vi = netdev_priv(dev);
511         int capacity;
512
513 again:
514         /* Free up any pending old buffers before queueing new ones. */
515         free_old_xmit_skbs(vi);
516
517         /* Try to transmit */
518         capacity = xmit_skb(vi, skb);
519
520         /* This can happen with OOM and indirect buffers. */
521         if (unlikely(capacity < 0)) {
522                 netif_stop_queue(dev);
523                 dev_warn(&dev->dev, "Unexpected full queue\n");
524                 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
525                         vi->svq->vq_ops->disable_cb(vi->svq);
526                         netif_start_queue(dev);
527                         goto again;
528                 }
529                 return NETDEV_TX_BUSY;
530         }
531         vi->svq->vq_ops->kick(vi->svq);
532
533         /*
534          * Put new one in send queue.  You'd expect we'd need this before
535          * xmit_skb calls add_buf(), since the callback can be triggered
536          * immediately after that.  But since the callback just triggers
537          * another call back here, normal network xmit locking prevents the
538          * race.
539          */
540         __skb_queue_head(&vi->send, skb);
541
542         /* Don't wait up for transmitted skbs to be freed. */
543         skb_orphan(skb);
544         nf_reset(skb);
545
546         /* Apparently nice girls don't return TX_BUSY; stop the queue
547          * before it gets out of hand.  Naturally, this wastes entries. */
548         if (capacity < 2+MAX_SKB_FRAGS) {
549                 netif_stop_queue(dev);
550                 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
551                         /* More just got used, free them then recheck. */
552                         capacity += free_old_xmit_skbs(vi);
553                         if (capacity >= 2+MAX_SKB_FRAGS) {
554                                 netif_start_queue(dev);
555                                 vi->svq->vq_ops->disable_cb(vi->svq);
556                         }
557                 }
558         }
559
560         return NETDEV_TX_OK;
561 }
562
563 static int virtnet_set_mac_address(struct net_device *dev, void *p)
564 {
565         struct virtnet_info *vi = netdev_priv(dev);
566         struct virtio_device *vdev = vi->vdev;
567         int ret;
568
569         ret = eth_mac_addr(dev, p);
570         if (ret)
571                 return ret;
572
573         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
574                 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
575                                   dev->dev_addr, dev->addr_len);
576
577         return 0;
578 }
579
580 #ifdef CONFIG_NET_POLL_CONTROLLER
581 static void virtnet_netpoll(struct net_device *dev)
582 {
583         struct virtnet_info *vi = netdev_priv(dev);
584
585         napi_schedule(&vi->napi);
586 }
587 #endif
588
589 static int virtnet_open(struct net_device *dev)
590 {
591         struct virtnet_info *vi = netdev_priv(dev);
592
593         napi_enable(&vi->napi);
594
595         /* If all buffers were filled by other side before we napi_enabled, we
596          * won't get another interrupt, so process any outstanding packets
597          * now.  virtnet_poll wants re-enable the queue, so we disable here.
598          * We synchronize against interrupts via NAPI_STATE_SCHED */
599         if (napi_schedule_prep(&vi->napi)) {
600                 vi->rvq->vq_ops->disable_cb(vi->rvq);
601                 __napi_schedule(&vi->napi);
602         }
603         return 0;
604 }
605
606 /*
607  * Send command via the control virtqueue and check status.  Commands
608  * supported by the hypervisor, as indicated by feature bits, should
609  * never fail unless improperly formated.
610  */
611 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
612                                  struct scatterlist *data, int out, int in)
613 {
614         struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
615         struct virtio_net_ctrl_hdr ctrl;
616         virtio_net_ctrl_ack status = ~0;
617         unsigned int tmp;
618         int i;
619
620         /* Caller should know better */
621         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
622                 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
623
624         out++; /* Add header */
625         in++; /* Add return status */
626
627         ctrl.class = class;
628         ctrl.cmd = cmd;
629
630         sg_init_table(sg, out + in);
631
632         sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
633         for_each_sg(data, s, out + in - 2, i)
634                 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
635         sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
636
637         BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
638
639         vi->cvq->vq_ops->kick(vi->cvq);
640
641         /*
642          * Spin for a response, the kick causes an ioport write, trapping
643          * into the hypervisor, so the request should be handled immediately.
644          */
645         while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
646                 cpu_relax();
647
648         return status == VIRTIO_NET_OK;
649 }
650
651 static int virtnet_close(struct net_device *dev)
652 {
653         struct virtnet_info *vi = netdev_priv(dev);
654
655         napi_disable(&vi->napi);
656
657         return 0;
658 }
659
660 static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
661 {
662         struct virtnet_info *vi = netdev_priv(dev);
663         struct virtio_device *vdev = vi->vdev;
664
665         if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
666                 return -ENOSYS;
667
668         return ethtool_op_set_tx_hw_csum(dev, data);
669 }
670
671 static void virtnet_set_rx_mode(struct net_device *dev)
672 {
673         struct virtnet_info *vi = netdev_priv(dev);
674         struct scatterlist sg[2];
675         u8 promisc, allmulti;
676         struct virtio_net_ctrl_mac *mac_data;
677         struct dev_addr_list *addr;
678         struct netdev_hw_addr *ha;
679         void *buf;
680         int i;
681
682         /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
683         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
684                 return;
685
686         promisc = ((dev->flags & IFF_PROMISC) != 0);
687         allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
688
689         sg_init_one(sg, &promisc, sizeof(promisc));
690
691         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
692                                   VIRTIO_NET_CTRL_RX_PROMISC,
693                                   sg, 1, 0))
694                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
695                          promisc ? "en" : "dis");
696
697         sg_init_one(sg, &allmulti, sizeof(allmulti));
698
699         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
700                                   VIRTIO_NET_CTRL_RX_ALLMULTI,
701                                   sg, 1, 0))
702                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
703                          allmulti ? "en" : "dis");
704
705         /* MAC filter - use one buffer for both lists */
706         mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
707                                  (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
708         if (!buf) {
709                 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
710                 return;
711         }
712
713         sg_init_table(sg, 2);
714
715         /* Store the unicast list and count in the front of the buffer */
716         mac_data->entries = dev->uc.count;
717         i = 0;
718         list_for_each_entry(ha, &dev->uc.list, list)
719                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
720
721         sg_set_buf(&sg[0], mac_data,
722                    sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
723
724         /* multicast list and count fill the end */
725         mac_data = (void *)&mac_data->macs[dev->uc.count][0];
726
727         mac_data->entries = dev->mc_count;
728         addr = dev->mc_list;
729         for (i = 0; i < dev->mc_count; i++, addr = addr->next)
730                 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
731
732         sg_set_buf(&sg[1], mac_data,
733                    sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
734
735         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
736                                   VIRTIO_NET_CTRL_MAC_TABLE_SET,
737                                   sg, 2, 0))
738                 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
739
740         kfree(buf);
741 }
742
743 static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
744 {
745         struct virtnet_info *vi = netdev_priv(dev);
746         struct scatterlist sg;
747
748         sg_init_one(&sg, &vid, sizeof(vid));
749
750         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
751                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
752                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
753 }
754
755 static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
756 {
757         struct virtnet_info *vi = netdev_priv(dev);
758         struct scatterlist sg;
759
760         sg_init_one(&sg, &vid, sizeof(vid));
761
762         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
763                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
764                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
765 }
766
767 static const struct ethtool_ops virtnet_ethtool_ops = {
768         .set_tx_csum = virtnet_set_tx_csum,
769         .set_sg = ethtool_op_set_sg,
770         .set_tso = ethtool_op_set_tso,
771         .set_ufo = ethtool_op_set_ufo,
772         .get_link = ethtool_op_get_link,
773 };
774
775 #define MIN_MTU 68
776 #define MAX_MTU 65535
777
778 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
779 {
780         if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
781                 return -EINVAL;
782         dev->mtu = new_mtu;
783         return 0;
784 }
785
786 static const struct net_device_ops virtnet_netdev = {
787         .ndo_open            = virtnet_open,
788         .ndo_stop            = virtnet_close,
789         .ndo_start_xmit      = start_xmit,
790         .ndo_validate_addr   = eth_validate_addr,
791         .ndo_set_mac_address = virtnet_set_mac_address,
792         .ndo_set_rx_mode     = virtnet_set_rx_mode,
793         .ndo_change_mtu      = virtnet_change_mtu,
794         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
795         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
796 #ifdef CONFIG_NET_POLL_CONTROLLER
797         .ndo_poll_controller = virtnet_netpoll,
798 #endif
799 };
800
801 static void virtnet_update_status(struct virtnet_info *vi)
802 {
803         u16 v;
804
805         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
806                 return;
807
808         vi->vdev->config->get(vi->vdev,
809                               offsetof(struct virtio_net_config, status),
810                               &v, sizeof(v));
811
812         /* Ignore unknown (future) status bits */
813         v &= VIRTIO_NET_S_LINK_UP;
814
815         if (vi->status == v)
816                 return;
817
818         vi->status = v;
819
820         if (vi->status & VIRTIO_NET_S_LINK_UP) {
821                 netif_carrier_on(vi->dev);
822                 netif_wake_queue(vi->dev);
823         } else {
824                 netif_carrier_off(vi->dev);
825                 netif_stop_queue(vi->dev);
826         }
827 }
828
829 static void virtnet_config_changed(struct virtio_device *vdev)
830 {
831         struct virtnet_info *vi = vdev->priv;
832
833         virtnet_update_status(vi);
834 }
835
836 static int virtnet_probe(struct virtio_device *vdev)
837 {
838         int err;
839         struct net_device *dev;
840         struct virtnet_info *vi;
841         struct virtqueue *vqs[3];
842         vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
843         const char *names[] = { "input", "output", "control" };
844         int nvqs;
845
846         /* Allocate ourselves a network device with room for our info */
847         dev = alloc_etherdev(sizeof(struct virtnet_info));
848         if (!dev)
849                 return -ENOMEM;
850
851         /* Set up network device as normal. */
852         dev->netdev_ops = &virtnet_netdev;
853         dev->features = NETIF_F_HIGHDMA;
854         SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
855         SET_NETDEV_DEV(dev, &vdev->dev);
856
857         /* Do we support "hardware" checksums? */
858         if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
859                 /* This opens up the world of extra features. */
860                 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
861                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
862                         dev->features |= NETIF_F_TSO | NETIF_F_UFO
863                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
864                 }
865                 /* Individual feature bits: what can host handle? */
866                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
867                         dev->features |= NETIF_F_TSO;
868                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
869                         dev->features |= NETIF_F_TSO6;
870                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
871                         dev->features |= NETIF_F_TSO_ECN;
872                 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
873                         dev->features |= NETIF_F_UFO;
874         }
875
876         /* Configuration may specify what MAC to use.  Otherwise random. */
877         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
878                 vdev->config->get(vdev,
879                                   offsetof(struct virtio_net_config, mac),
880                                   dev->dev_addr, dev->addr_len);
881         } else
882                 random_ether_addr(dev->dev_addr);
883
884         /* Set up our device-specific information */
885         vi = netdev_priv(dev);
886         netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
887         vi->dev = dev;
888         vi->vdev = vdev;
889         vdev->priv = vi;
890         vi->pages = NULL;
891         INIT_DELAYED_WORK(&vi->refill, refill_work);
892
893         /* If we can receive ANY GSO packets, we must allocate large ones. */
894         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
895             || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
896             || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
897                 vi->big_packets = true;
898
899         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
900                 vi->mergeable_rx_bufs = true;
901
902         /* We expect two virtqueues, receive then send,
903          * and optionally control. */
904         nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
905
906         err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
907         if (err)
908                 goto free;
909
910         vi->rvq = vqs[0];
911         vi->svq = vqs[1];
912
913         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
914                 vi->cvq = vqs[2];
915
916                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
917                         dev->features |= NETIF_F_HW_VLAN_FILTER;
918         }
919
920         /* Initialize our empty receive and send queues. */
921         skb_queue_head_init(&vi->recv);
922         skb_queue_head_init(&vi->send);
923
924         err = register_netdev(dev);
925         if (err) {
926                 pr_debug("virtio_net: registering device failed\n");
927                 goto free_vqs;
928         }
929
930         /* Last of all, set up some receive buffers. */
931         try_fill_recv(vi, GFP_KERNEL);
932
933         /* If we didn't even get one input buffer, we're useless. */
934         if (vi->num == 0) {
935                 err = -ENOMEM;
936                 goto unregister;
937         }
938
939         vi->status = VIRTIO_NET_S_LINK_UP;
940         virtnet_update_status(vi);
941         netif_carrier_on(dev);
942
943         pr_debug("virtnet: registered device %s\n", dev->name);
944         return 0;
945
946 unregister:
947         unregister_netdev(dev);
948         cancel_delayed_work_sync(&vi->refill);
949 free_vqs:
950         vdev->config->del_vqs(vdev);
951 free:
952         free_netdev(dev);
953         return err;
954 }
955
956 static void __devexit virtnet_remove(struct virtio_device *vdev)
957 {
958         struct virtnet_info *vi = vdev->priv;
959         struct sk_buff *skb;
960
961         /* Stop all the virtqueues. */
962         vdev->config->reset(vdev);
963
964         /* Free our skbs in send and recv queues, if any. */
965         while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
966                 kfree_skb(skb);
967                 vi->num--;
968         }
969         __skb_queue_purge(&vi->send);
970
971         BUG_ON(vi->num != 0);
972
973         unregister_netdev(vi->dev);
974         cancel_delayed_work_sync(&vi->refill);
975
976         vdev->config->del_vqs(vi->vdev);
977
978         while (vi->pages)
979                 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
980
981         free_netdev(vi->dev);
982 }
983
984 static struct virtio_device_id id_table[] = {
985         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
986         { 0 },
987 };
988
989 static unsigned int features[] = {
990         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
991         VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
992         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
993         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
994         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
995         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
996         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
997 };
998
999 static struct virtio_driver virtio_net = {
1000         .feature_table = features,
1001         .feature_table_size = ARRAY_SIZE(features),
1002         .driver.name =  KBUILD_MODNAME,
1003         .driver.owner = THIS_MODULE,
1004         .id_table =     id_table,
1005         .probe =        virtnet_probe,
1006         .remove =       __devexit_p(virtnet_remove),
1007         .config_changed = virtnet_config_changed,
1008 };
1009
1010 static int __init init(void)
1011 {
1012         return register_virtio_driver(&virtio_net);
1013 }
1014
1015 static void __exit fini(void)
1016 {
1017         unregister_virtio_driver(&virtio_net);
1018 }
1019 module_init(init);
1020 module_exit(fini);
1021
1022 MODULE_DEVICE_TABLE(virtio, id_table);
1023 MODULE_DESCRIPTION("Virtio network driver");
1024 MODULE_LICENSE("GPL");