const: constify remaining dev_pm_ops
[safe/jmp/linux-2.6] / drivers / net / vmxnet3 / vmxnet3_drv.c
1 /*
2  * Linux driver for VMware's vmxnet3 ethernet NIC.
3  *
4  * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation; version 2 of the License and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT. See the GNU General Public License for more
14  * details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * The full GNU General Public License is included in this distribution in
21  * the file called "COPYING".
22  *
23  * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24  *
25  */
26
27 #include <net/ip6_checksum.h>
28
29 #include "vmxnet3_int.h"
30
31 char vmxnet3_driver_name[] = "vmxnet3";
32 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
33
34 /*
35  * PCI Device ID Table
36  * Last entry must be all 0s
37  */
38 static const struct pci_device_id vmxnet3_pciid_table[] = {
39         {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
40         {0}
41 };
42
43 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
44
45 static atomic_t devices_found;
46
47
48 /*
49  *    Enable/Disable the given intr
50  */
51 static void
52 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
53 {
54         VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
55 }
56
57
58 static void
59 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
60 {
61         VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
62 }
63
64
65 /*
66  *    Enable/Disable all intrs used by the device
67  */
68 static void
69 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
70 {
71         int i;
72
73         for (i = 0; i < adapter->intr.num_intrs; i++)
74                 vmxnet3_enable_intr(adapter, i);
75 }
76
77
78 static void
79 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
80 {
81         int i;
82
83         for (i = 0; i < adapter->intr.num_intrs; i++)
84                 vmxnet3_disable_intr(adapter, i);
85 }
86
87
88 static void
89 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
90 {
91         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
92 }
93
94
95 static bool
96 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
97 {
98         return netif_queue_stopped(adapter->netdev);
99 }
100
101
102 static void
103 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104 {
105         tq->stopped = false;
106         netif_start_queue(adapter->netdev);
107 }
108
109
110 static void
111 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
112 {
113         tq->stopped = false;
114         netif_wake_queue(adapter->netdev);
115 }
116
117
118 static void
119 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
120 {
121         tq->stopped = true;
122         tq->num_stop++;
123         netif_stop_queue(adapter->netdev);
124 }
125
126
127 /*
128  * Check the link state. This may start or stop the tx queue.
129  */
130 static void
131 vmxnet3_check_link(struct vmxnet3_adapter *adapter)
132 {
133         u32 ret;
134
135         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
136         ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
137         adapter->link_speed = ret >> 16;
138         if (ret & 1) { /* Link is up. */
139                 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
140                        adapter->netdev->name, adapter->link_speed);
141                 if (!netif_carrier_ok(adapter->netdev))
142                         netif_carrier_on(adapter->netdev);
143
144                 vmxnet3_tq_start(&adapter->tx_queue, adapter);
145         } else {
146                 printk(KERN_INFO "%s: NIC Link is Down\n",
147                        adapter->netdev->name);
148                 if (netif_carrier_ok(adapter->netdev))
149                         netif_carrier_off(adapter->netdev);
150
151                 vmxnet3_tq_stop(&adapter->tx_queue, adapter);
152         }
153 }
154
155 static void
156 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
157 {
158         u32 events = le32_to_cpu(adapter->shared->ecr);
159         if (!events)
160                 return;
161
162         vmxnet3_ack_events(adapter, events);
163
164         /* Check if link state has changed */
165         if (events & VMXNET3_ECR_LINK)
166                 vmxnet3_check_link(adapter);
167
168         /* Check if there is an error on xmit/recv queues */
169         if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
170                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
171                                        VMXNET3_CMD_GET_QUEUE_STATUS);
172
173                 if (adapter->tqd_start->status.stopped) {
174                         printk(KERN_ERR "%s: tq error 0x%x\n",
175                                adapter->netdev->name,
176                                le32_to_cpu(adapter->tqd_start->status.error));
177                 }
178                 if (adapter->rqd_start->status.stopped) {
179                         printk(KERN_ERR "%s: rq error 0x%x\n",
180                                adapter->netdev->name,
181                                adapter->rqd_start->status.error);
182                 }
183
184                 schedule_work(&adapter->work);
185         }
186 }
187
188 #ifdef __BIG_ENDIAN_BITFIELD
189 /*
190  * The device expects the bitfields in shared structures to be written in
191  * little endian. When CPU is big endian, the following routines are used to
192  * correctly read and write into ABI.
193  * The general technique used here is : double word bitfields are defined in
194  * opposite order for big endian architecture. Then before reading them in
195  * driver the complete double word is translated using le32_to_cpu. Similarly
196  * After the driver writes into bitfields, cpu_to_le32 is used to translate the
197  * double words into required format.
198  * In order to avoid touching bits in shared structure more than once, temporary
199  * descriptors are used. These are passed as srcDesc to following functions.
200  */
201 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
202                                 struct Vmxnet3_RxDesc *dstDesc)
203 {
204         u32 *src = (u32 *)srcDesc + 2;
205         u32 *dst = (u32 *)dstDesc + 2;
206         dstDesc->addr = le64_to_cpu(srcDesc->addr);
207         *dst = le32_to_cpu(*src);
208         dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
209 }
210
211 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
212                                struct Vmxnet3_TxDesc *dstDesc)
213 {
214         int i;
215         u32 *src = (u32 *)(srcDesc + 1);
216         u32 *dst = (u32 *)(dstDesc + 1);
217
218         /* Working backwards so that the gen bit is set at the end. */
219         for (i = 2; i > 0; i--) {
220                 src--;
221                 dst--;
222                 *dst = cpu_to_le32(*src);
223         }
224 }
225
226
227 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
228                                 struct Vmxnet3_RxCompDesc *dstDesc)
229 {
230         int i = 0;
231         u32 *src = (u32 *)srcDesc;
232         u32 *dst = (u32 *)dstDesc;
233         for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
234                 *dst = le32_to_cpu(*src);
235                 src++;
236                 dst++;
237         }
238 }
239
240
241 /* Used to read bitfield values from double words. */
242 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
243 {
244         u32 temp = le32_to_cpu(*bitfield);
245         u32 mask = ((1 << size) - 1) << pos;
246         temp &= mask;
247         temp >>= pos;
248         return temp;
249 }
250
251
252
253 #endif  /* __BIG_ENDIAN_BITFIELD */
254
255 #ifdef __BIG_ENDIAN_BITFIELD
256
257 #   define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
258                         txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
259                         VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
260 #   define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
261                         txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
262                         VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
263 #   define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
264                         VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
265                         VMXNET3_TCD_GEN_SIZE)
266 #   define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
267                         VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
268 #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
269                         (dstrcd) = (tmp); \
270                         vmxnet3_RxCompToCPU((rcd), (tmp)); \
271                 } while (0)
272 #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
273                         (dstrxd) = (tmp); \
274                         vmxnet3_RxDescToCPU((rxd), (tmp)); \
275                 } while (0)
276
277 #else
278
279 #   define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
280 #   define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
281 #   define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
282 #   define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
283 #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
284 #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
285
286 #endif /* __BIG_ENDIAN_BITFIELD  */
287
288
289 static void
290 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
291                      struct pci_dev *pdev)
292 {
293         if (tbi->map_type == VMXNET3_MAP_SINGLE)
294                 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
295                                  PCI_DMA_TODEVICE);
296         else if (tbi->map_type == VMXNET3_MAP_PAGE)
297                 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
298                                PCI_DMA_TODEVICE);
299         else
300                 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
301
302         tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
303 }
304
305
306 static int
307 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
308                   struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
309 {
310         struct sk_buff *skb;
311         int entries = 0;
312
313         /* no out of order completion */
314         BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
315         BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
316
317         skb = tq->buf_info[eop_idx].skb;
318         BUG_ON(skb == NULL);
319         tq->buf_info[eop_idx].skb = NULL;
320
321         VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
322
323         while (tq->tx_ring.next2comp != eop_idx) {
324                 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
325                                      pdev);
326
327                 /* update next2comp w/o tx_lock. Since we are marking more,
328                  * instead of less, tx ring entries avail, the worst case is
329                  * that the tx routine incorrectly re-queues a pkt due to
330                  * insufficient tx ring entries.
331                  */
332                 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
333                 entries++;
334         }
335
336         dev_kfree_skb_any(skb);
337         return entries;
338 }
339
340
341 static int
342 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
343                         struct vmxnet3_adapter *adapter)
344 {
345         int completed = 0;
346         union Vmxnet3_GenericDesc *gdesc;
347
348         gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
349         while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
350                 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
351                                                &gdesc->tcd), tq, adapter->pdev,
352                                                adapter);
353
354                 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
355                 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
356         }
357
358         if (completed) {
359                 spin_lock(&tq->tx_lock);
360                 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
361                              vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
362                              VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
363                              netif_carrier_ok(adapter->netdev))) {
364                         vmxnet3_tq_wake(tq, adapter);
365                 }
366                 spin_unlock(&tq->tx_lock);
367         }
368         return completed;
369 }
370
371
372 static void
373 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
374                    struct vmxnet3_adapter *adapter)
375 {
376         int i;
377
378         while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
379                 struct vmxnet3_tx_buf_info *tbi;
380                 union Vmxnet3_GenericDesc *gdesc;
381
382                 tbi = tq->buf_info + tq->tx_ring.next2comp;
383                 gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
384
385                 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
386                 if (tbi->skb) {
387                         dev_kfree_skb_any(tbi->skb);
388                         tbi->skb = NULL;
389                 }
390                 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
391         }
392
393         /* sanity check, verify all buffers are indeed unmapped and freed */
394         for (i = 0; i < tq->tx_ring.size; i++) {
395                 BUG_ON(tq->buf_info[i].skb != NULL ||
396                        tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
397         }
398
399         tq->tx_ring.gen = VMXNET3_INIT_GEN;
400         tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
401
402         tq->comp_ring.gen = VMXNET3_INIT_GEN;
403         tq->comp_ring.next2proc = 0;
404 }
405
406
407 void
408 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
409                    struct vmxnet3_adapter *adapter)
410 {
411         if (tq->tx_ring.base) {
412                 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
413                                     sizeof(struct Vmxnet3_TxDesc),
414                                     tq->tx_ring.base, tq->tx_ring.basePA);
415                 tq->tx_ring.base = NULL;
416         }
417         if (tq->data_ring.base) {
418                 pci_free_consistent(adapter->pdev, tq->data_ring.size *
419                                     sizeof(struct Vmxnet3_TxDataDesc),
420                                     tq->data_ring.base, tq->data_ring.basePA);
421                 tq->data_ring.base = NULL;
422         }
423         if (tq->comp_ring.base) {
424                 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
425                                     sizeof(struct Vmxnet3_TxCompDesc),
426                                     tq->comp_ring.base, tq->comp_ring.basePA);
427                 tq->comp_ring.base = NULL;
428         }
429         kfree(tq->buf_info);
430         tq->buf_info = NULL;
431 }
432
433
434 static void
435 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
436                 struct vmxnet3_adapter *adapter)
437 {
438         int i;
439
440         /* reset the tx ring contents to 0 and reset the tx ring states */
441         memset(tq->tx_ring.base, 0, tq->tx_ring.size *
442                sizeof(struct Vmxnet3_TxDesc));
443         tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
444         tq->tx_ring.gen = VMXNET3_INIT_GEN;
445
446         memset(tq->data_ring.base, 0, tq->data_ring.size *
447                sizeof(struct Vmxnet3_TxDataDesc));
448
449         /* reset the tx comp ring contents to 0 and reset comp ring states */
450         memset(tq->comp_ring.base, 0, tq->comp_ring.size *
451                sizeof(struct Vmxnet3_TxCompDesc));
452         tq->comp_ring.next2proc = 0;
453         tq->comp_ring.gen = VMXNET3_INIT_GEN;
454
455         /* reset the bookkeeping data */
456         memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
457         for (i = 0; i < tq->tx_ring.size; i++)
458                 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
459
460         /* stats are not reset */
461 }
462
463
464 static int
465 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
466                   struct vmxnet3_adapter *adapter)
467 {
468         BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
469                tq->comp_ring.base || tq->buf_info);
470
471         tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
472                            * sizeof(struct Vmxnet3_TxDesc),
473                            &tq->tx_ring.basePA);
474         if (!tq->tx_ring.base) {
475                 printk(KERN_ERR "%s: failed to allocate tx ring\n",
476                        adapter->netdev->name);
477                 goto err;
478         }
479
480         tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
481                              tq->data_ring.size *
482                              sizeof(struct Vmxnet3_TxDataDesc),
483                              &tq->data_ring.basePA);
484         if (!tq->data_ring.base) {
485                 printk(KERN_ERR "%s: failed to allocate data ring\n",
486                        adapter->netdev->name);
487                 goto err;
488         }
489
490         tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
491                              tq->comp_ring.size *
492                              sizeof(struct Vmxnet3_TxCompDesc),
493                              &tq->comp_ring.basePA);
494         if (!tq->comp_ring.base) {
495                 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
496                        adapter->netdev->name);
497                 goto err;
498         }
499
500         tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
501                                GFP_KERNEL);
502         if (!tq->buf_info) {
503                 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
504                        adapter->netdev->name);
505                 goto err;
506         }
507
508         return 0;
509
510 err:
511         vmxnet3_tq_destroy(tq, adapter);
512         return -ENOMEM;
513 }
514
515
516 /*
517  *    starting from ring->next2fill, allocate rx buffers for the given ring
518  *    of the rx queue and update the rx desc. stop after @num_to_alloc buffers
519  *    are allocated or allocation fails
520  */
521
522 static int
523 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
524                         int num_to_alloc, struct vmxnet3_adapter *adapter)
525 {
526         int num_allocated = 0;
527         struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
528         struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
529         u32 val;
530
531         while (num_allocated < num_to_alloc) {
532                 struct vmxnet3_rx_buf_info *rbi;
533                 union Vmxnet3_GenericDesc *gd;
534
535                 rbi = rbi_base + ring->next2fill;
536                 gd = ring->base + ring->next2fill;
537
538                 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
539                         if (rbi->skb == NULL) {
540                                 rbi->skb = dev_alloc_skb(rbi->len +
541                                                          NET_IP_ALIGN);
542                                 if (unlikely(rbi->skb == NULL)) {
543                                         rq->stats.rx_buf_alloc_failure++;
544                                         break;
545                                 }
546                                 rbi->skb->dev = adapter->netdev;
547
548                                 skb_reserve(rbi->skb, NET_IP_ALIGN);
549                                 rbi->dma_addr = pci_map_single(adapter->pdev,
550                                                 rbi->skb->data, rbi->len,
551                                                 PCI_DMA_FROMDEVICE);
552                         } else {
553                                 /* rx buffer skipped by the device */
554                         }
555                         val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
556                 } else {
557                         BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
558                                rbi->len  != PAGE_SIZE);
559
560                         if (rbi->page == NULL) {
561                                 rbi->page = alloc_page(GFP_ATOMIC);
562                                 if (unlikely(rbi->page == NULL)) {
563                                         rq->stats.rx_buf_alloc_failure++;
564                                         break;
565                                 }
566                                 rbi->dma_addr = pci_map_page(adapter->pdev,
567                                                 rbi->page, 0, PAGE_SIZE,
568                                                 PCI_DMA_FROMDEVICE);
569                         } else {
570                                 /* rx buffers skipped by the device */
571                         }
572                         val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
573                 }
574
575                 BUG_ON(rbi->dma_addr == 0);
576                 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
577                 gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
578                                            | val | rbi->len);
579
580                 num_allocated++;
581                 vmxnet3_cmd_ring_adv_next2fill(ring);
582         }
583         rq->uncommitted[ring_idx] += num_allocated;
584
585         dev_dbg(&adapter->netdev->dev,
586                 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
587                 "%u, uncommited %u\n", num_allocated, ring->next2fill,
588                 ring->next2comp, rq->uncommitted[ring_idx]);
589
590         /* so that the device can distinguish a full ring and an empty ring */
591         BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
592
593         return num_allocated;
594 }
595
596
597 static void
598 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
599                     struct vmxnet3_rx_buf_info *rbi)
600 {
601         struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
602                 skb_shinfo(skb)->nr_frags;
603
604         BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
605
606         frag->page = rbi->page;
607         frag->page_offset = 0;
608         frag->size = rcd->len;
609         skb->data_len += frag->size;
610         skb_shinfo(skb)->nr_frags++;
611 }
612
613
614 static void
615 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
616                 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
617                 struct vmxnet3_adapter *adapter)
618 {
619         u32 dw2, len;
620         unsigned long buf_offset;
621         int i;
622         union Vmxnet3_GenericDesc *gdesc;
623         struct vmxnet3_tx_buf_info *tbi = NULL;
624
625         BUG_ON(ctx->copy_size > skb_headlen(skb));
626
627         /* use the previous gen bit for the SOP desc */
628         dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
629
630         ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
631         gdesc = ctx->sop_txd; /* both loops below can be skipped */
632
633         /* no need to map the buffer if headers are copied */
634         if (ctx->copy_size) {
635                 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
636                                         tq->tx_ring.next2fill *
637                                         sizeof(struct Vmxnet3_TxDataDesc));
638                 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
639                 ctx->sop_txd->dword[3] = 0;
640
641                 tbi = tq->buf_info + tq->tx_ring.next2fill;
642                 tbi->map_type = VMXNET3_MAP_NONE;
643
644                 dev_dbg(&adapter->netdev->dev,
645                         "txd[%u]: 0x%Lx 0x%x 0x%x\n",
646                         tq->tx_ring.next2fill,
647                         le64_to_cpu(ctx->sop_txd->txd.addr),
648                         ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
649                 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
650
651                 /* use the right gen for non-SOP desc */
652                 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
653         }
654
655         /* linear part can use multiple tx desc if it's big */
656         len = skb_headlen(skb) - ctx->copy_size;
657         buf_offset = ctx->copy_size;
658         while (len) {
659                 u32 buf_size;
660
661                 buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
662                            VMXNET3_MAX_TX_BUF_SIZE : len;
663
664                 tbi = tq->buf_info + tq->tx_ring.next2fill;
665                 tbi->map_type = VMXNET3_MAP_SINGLE;
666                 tbi->dma_addr = pci_map_single(adapter->pdev,
667                                 skb->data + buf_offset, buf_size,
668                                 PCI_DMA_TODEVICE);
669
670                 tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
671
672                 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
673                 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
674
675                 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
676                 gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
677                 gdesc->dword[3] = 0;
678
679                 dev_dbg(&adapter->netdev->dev,
680                         "txd[%u]: 0x%Lx 0x%x 0x%x\n",
681                         tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
682                         le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
683                 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
684                 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
685
686                 len -= buf_size;
687                 buf_offset += buf_size;
688         }
689
690         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
691                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
692
693                 tbi = tq->buf_info + tq->tx_ring.next2fill;
694                 tbi->map_type = VMXNET3_MAP_PAGE;
695                 tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
696                                              frag->page_offset, frag->size,
697                                              PCI_DMA_TODEVICE);
698
699                 tbi->len = frag->size;
700
701                 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
702                 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
703
704                 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
705                 gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
706                 gdesc->dword[3] = 0;
707
708                 dev_dbg(&adapter->netdev->dev,
709                         "txd[%u]: 0x%llu %u %u\n",
710                         tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
711                         le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
712                 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
713                 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
714         }
715
716         ctx->eop_txd = gdesc;
717
718         /* set the last buf_info for the pkt */
719         tbi->skb = skb;
720         tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
721 }
722
723
724 /*
725  *    parse and copy relevant protocol headers:
726  *      For a tso pkt, relevant headers are L2/3/4 including options
727  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
728  *      if it's a TCP/UDP pkt
729  *
730  * Returns:
731  *    -1:  error happens during parsing
732  *     0:  protocol headers parsed, but too big to be copied
733  *     1:  protocol headers parsed and copied
734  *
735  * Other effects:
736  *    1. related *ctx fields are updated.
737  *    2. ctx->copy_size is # of bytes copied
738  *    3. the portion copied is guaranteed to be in the linear part
739  *
740  */
741 static int
742 vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
743                            struct vmxnet3_tx_ctx *ctx,
744                            struct vmxnet3_adapter *adapter)
745 {
746         struct Vmxnet3_TxDataDesc *tdd;
747
748         if (ctx->mss) {
749                 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
750                 ctx->l4_hdr_size = ((struct tcphdr *)
751                                    skb_transport_header(skb))->doff * 4;
752                 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
753         } else {
754                 unsigned int pull_size;
755
756                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
757                         ctx->eth_ip_hdr_size = skb_transport_offset(skb);
758
759                         if (ctx->ipv4) {
760                                 struct iphdr *iph = (struct iphdr *)
761                                                     skb_network_header(skb);
762                                 if (iph->protocol == IPPROTO_TCP) {
763                                         pull_size = ctx->eth_ip_hdr_size +
764                                                     sizeof(struct tcphdr);
765
766                                         if (unlikely(!pskb_may_pull(skb,
767                                                                 pull_size))) {
768                                                 goto err;
769                                         }
770                                         ctx->l4_hdr_size = ((struct tcphdr *)
771                                            skb_transport_header(skb))->doff * 4;
772                                 } else if (iph->protocol == IPPROTO_UDP) {
773                                         ctx->l4_hdr_size =
774                                                         sizeof(struct udphdr);
775                                 } else {
776                                         ctx->l4_hdr_size = 0;
777                                 }
778                         } else {
779                                 /* for simplicity, don't copy L4 headers */
780                                 ctx->l4_hdr_size = 0;
781                         }
782                         ctx->copy_size = ctx->eth_ip_hdr_size +
783                                          ctx->l4_hdr_size;
784                 } else {
785                         ctx->eth_ip_hdr_size = 0;
786                         ctx->l4_hdr_size = 0;
787                         /* copy as much as allowed */
788                         ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
789                                              , skb_headlen(skb));
790                 }
791
792                 /* make sure headers are accessible directly */
793                 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
794                         goto err;
795         }
796
797         if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
798                 tq->stats.oversized_hdr++;
799                 ctx->copy_size = 0;
800                 return 0;
801         }
802
803         tdd = tq->data_ring.base + tq->tx_ring.next2fill;
804
805         memcpy(tdd->data, skb->data, ctx->copy_size);
806         dev_dbg(&adapter->netdev->dev,
807                 "copy %u bytes to dataRing[%u]\n",
808                 ctx->copy_size, tq->tx_ring.next2fill);
809         return 1;
810
811 err:
812         return -1;
813 }
814
815
816 static void
817 vmxnet3_prepare_tso(struct sk_buff *skb,
818                     struct vmxnet3_tx_ctx *ctx)
819 {
820         struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
821         if (ctx->ipv4) {
822                 struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
823                 iph->check = 0;
824                 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
825                                                  IPPROTO_TCP, 0);
826         } else {
827                 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
828                 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
829                                                IPPROTO_TCP, 0);
830         }
831 }
832
833
834 /*
835  * Transmits a pkt thru a given tq
836  * Returns:
837  *    NETDEV_TX_OK:      descriptors are setup successfully
838  *    NETDEV_TX_OK:      error occured, the pkt is dropped
839  *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
840  *
841  * Side-effects:
842  *    1. tx ring may be changed
843  *    2. tq stats may be updated accordingly
844  *    3. shared->txNumDeferred may be updated
845  */
846
847 static int
848 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
849                 struct vmxnet3_adapter *adapter, struct net_device *netdev)
850 {
851         int ret;
852         u32 count;
853         unsigned long flags;
854         struct vmxnet3_tx_ctx ctx;
855         union Vmxnet3_GenericDesc *gdesc;
856 #ifdef __BIG_ENDIAN_BITFIELD
857         /* Use temporary descriptor to avoid touching bits multiple times */
858         union Vmxnet3_GenericDesc tempTxDesc;
859 #endif
860
861         /* conservatively estimate # of descriptors to use */
862         count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
863                 skb_shinfo(skb)->nr_frags + 1;
864
865         ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
866
867         ctx.mss = skb_shinfo(skb)->gso_size;
868         if (ctx.mss) {
869                 if (skb_header_cloned(skb)) {
870                         if (unlikely(pskb_expand_head(skb, 0, 0,
871                                                       GFP_ATOMIC) != 0)) {
872                                 tq->stats.drop_tso++;
873                                 goto drop_pkt;
874                         }
875                         tq->stats.copy_skb_header++;
876                 }
877                 vmxnet3_prepare_tso(skb, &ctx);
878         } else {
879                 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
880
881                         /* non-tso pkts must not use more than
882                          * VMXNET3_MAX_TXD_PER_PKT entries
883                          */
884                         if (skb_linearize(skb) != 0) {
885                                 tq->stats.drop_too_many_frags++;
886                                 goto drop_pkt;
887                         }
888                         tq->stats.linearized++;
889
890                         /* recalculate the # of descriptors to use */
891                         count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
892                 }
893         }
894
895         ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
896         if (ret >= 0) {
897                 BUG_ON(ret <= 0 && ctx.copy_size != 0);
898                 /* hdrs parsed, check against other limits */
899                 if (ctx.mss) {
900                         if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
901                                      VMXNET3_MAX_TX_BUF_SIZE)) {
902                                 goto hdr_too_big;
903                         }
904                 } else {
905                         if (skb->ip_summed == CHECKSUM_PARTIAL) {
906                                 if (unlikely(ctx.eth_ip_hdr_size +
907                                              skb->csum_offset >
908                                              VMXNET3_MAX_CSUM_OFFSET)) {
909                                         goto hdr_too_big;
910                                 }
911                         }
912                 }
913         } else {
914                 tq->stats.drop_hdr_inspect_err++;
915                 goto drop_pkt;
916         }
917
918         spin_lock_irqsave(&tq->tx_lock, flags);
919
920         if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
921                 tq->stats.tx_ring_full++;
922                 dev_dbg(&adapter->netdev->dev,
923                         "tx queue stopped on %s, next2comp %u"
924                         " next2fill %u\n", adapter->netdev->name,
925                         tq->tx_ring.next2comp, tq->tx_ring.next2fill);
926
927                 vmxnet3_tq_stop(tq, adapter);
928                 spin_unlock_irqrestore(&tq->tx_lock, flags);
929                 return NETDEV_TX_BUSY;
930         }
931
932         /* fill tx descs related to addr & len */
933         vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
934
935         /* setup the EOP desc */
936         ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
937
938         /* setup the SOP desc */
939 #ifdef __BIG_ENDIAN_BITFIELD
940         gdesc = &tempTxDesc;
941         gdesc->dword[2] = ctx.sop_txd->dword[2];
942         gdesc->dword[3] = ctx.sop_txd->dword[3];
943 #else
944         gdesc = ctx.sop_txd;
945 #endif
946         if (ctx.mss) {
947                 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
948                 gdesc->txd.om = VMXNET3_OM_TSO;
949                 gdesc->txd.msscof = ctx.mss;
950                 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
951                              gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
952         } else {
953                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
954                         gdesc->txd.hlen = ctx.eth_ip_hdr_size;
955                         gdesc->txd.om = VMXNET3_OM_CSUM;
956                         gdesc->txd.msscof = ctx.eth_ip_hdr_size +
957                                             skb->csum_offset;
958                 } else {
959                         gdesc->txd.om = 0;
960                         gdesc->txd.msscof = 0;
961                 }
962                 le32_add_cpu(&tq->shared->txNumDeferred, 1);
963         }
964
965         if (vlan_tx_tag_present(skb)) {
966                 gdesc->txd.ti = 1;
967                 gdesc->txd.tci = vlan_tx_tag_get(skb);
968         }
969
970         /* finally flips the GEN bit of the SOP desc. */
971         gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
972                                                   VMXNET3_TXD_GEN);
973 #ifdef __BIG_ENDIAN_BITFIELD
974         /* Finished updating in bitfields of Tx Desc, so write them in original
975          * place.
976          */
977         vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
978                            (struct Vmxnet3_TxDesc *)ctx.sop_txd);
979         gdesc = ctx.sop_txd;
980 #endif
981         dev_dbg(&adapter->netdev->dev,
982                 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
983                 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
984                 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
985                 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
986
987         spin_unlock_irqrestore(&tq->tx_lock, flags);
988
989         if (le32_to_cpu(tq->shared->txNumDeferred) >=
990                                         le32_to_cpu(tq->shared->txThreshold)) {
991                 tq->shared->txNumDeferred = 0;
992                 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
993                                        tq->tx_ring.next2fill);
994         }
995         netdev->trans_start = jiffies;
996
997         return NETDEV_TX_OK;
998
999 hdr_too_big:
1000         tq->stats.drop_oversized_hdr++;
1001 drop_pkt:
1002         tq->stats.drop_total++;
1003         dev_kfree_skb(skb);
1004         return NETDEV_TX_OK;
1005 }
1006
1007
1008 static netdev_tx_t
1009 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1010 {
1011         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1012
1013         return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
1014 }
1015
1016
1017 static void
1018 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1019                 struct sk_buff *skb,
1020                 union Vmxnet3_GenericDesc *gdesc)
1021 {
1022         if (!gdesc->rcd.cnc && adapter->rxcsum) {
1023                 /* typical case: TCP/UDP over IP and both csums are correct */
1024                 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
1025                                                         VMXNET3_RCD_CSUM_OK) {
1026                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1027                         BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1028                         BUG_ON(!(gdesc->rcd.v4  || gdesc->rcd.v6));
1029                         BUG_ON(gdesc->rcd.frg);
1030                 } else {
1031                         if (gdesc->rcd.csum) {
1032                                 skb->csum = htons(gdesc->rcd.csum);
1033                                 skb->ip_summed = CHECKSUM_PARTIAL;
1034                         } else {
1035                                 skb->ip_summed = CHECKSUM_NONE;
1036                         }
1037                 }
1038         } else {
1039                 skb->ip_summed = CHECKSUM_NONE;
1040         }
1041 }
1042
1043
1044 static void
1045 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1046                  struct vmxnet3_rx_ctx *ctx,  struct vmxnet3_adapter *adapter)
1047 {
1048         rq->stats.drop_err++;
1049         if (!rcd->fcs)
1050                 rq->stats.drop_fcs++;
1051
1052         rq->stats.drop_total++;
1053
1054         /*
1055          * We do not unmap and chain the rx buffer to the skb.
1056          * We basically pretend this buffer is not used and will be recycled
1057          * by vmxnet3_rq_alloc_rx_buf()
1058          */
1059
1060         /*
1061          * ctx->skb may be NULL if this is the first and the only one
1062          * desc for the pkt
1063          */
1064         if (ctx->skb)
1065                 dev_kfree_skb_irq(ctx->skb);
1066
1067         ctx->skb = NULL;
1068 }
1069
1070
1071 static int
1072 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1073                        struct vmxnet3_adapter *adapter, int quota)
1074 {
1075         static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
1076         u32 num_rxd = 0;
1077         struct Vmxnet3_RxCompDesc *rcd;
1078         struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1079 #ifdef __BIG_ENDIAN_BITFIELD
1080         struct Vmxnet3_RxDesc rxCmdDesc;
1081         struct Vmxnet3_RxCompDesc rxComp;
1082 #endif
1083         vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1084                           &rxComp);
1085         while (rcd->gen == rq->comp_ring.gen) {
1086                 struct vmxnet3_rx_buf_info *rbi;
1087                 struct sk_buff *skb;
1088                 int num_to_alloc;
1089                 struct Vmxnet3_RxDesc *rxd;
1090                 u32 idx, ring_idx;
1091
1092                 if (num_rxd >= quota) {
1093                         /* we may stop even before we see the EOP desc of
1094                          * the current pkt
1095                          */
1096                         break;
1097                 }
1098                 num_rxd++;
1099
1100                 idx = rcd->rxdIdx;
1101                 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
1102                 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1103                                   &rxCmdDesc);
1104                 rbi = rq->buf_info[ring_idx] + idx;
1105
1106                 BUG_ON(rxd->addr != rbi->dma_addr ||
1107                        rxd->len != rbi->len);
1108
1109                 if (unlikely(rcd->eop && rcd->err)) {
1110                         vmxnet3_rx_error(rq, rcd, ctx, adapter);
1111                         goto rcd_done;
1112                 }
1113
1114                 if (rcd->sop) { /* first buf of the pkt */
1115                         BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1116                                rcd->rqID != rq->qid);
1117
1118                         BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1119                         BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1120
1121                         if (unlikely(rcd->len == 0)) {
1122                                 /* Pretend the rx buffer is skipped. */
1123                                 BUG_ON(!(rcd->sop && rcd->eop));
1124                                 dev_dbg(&adapter->netdev->dev,
1125                                         "rxRing[%u][%u] 0 length\n",
1126                                         ring_idx, idx);
1127                                 goto rcd_done;
1128                         }
1129
1130                         ctx->skb = rbi->skb;
1131                         rbi->skb = NULL;
1132
1133                         pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1134                                          PCI_DMA_FROMDEVICE);
1135
1136                         skb_put(ctx->skb, rcd->len);
1137                 } else {
1138                         BUG_ON(ctx->skb == NULL);
1139                         /* non SOP buffer must be type 1 in most cases */
1140                         if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
1141                                 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1142
1143                                 if (rcd->len) {
1144                                         pci_unmap_page(adapter->pdev,
1145                                                        rbi->dma_addr, rbi->len,
1146                                                        PCI_DMA_FROMDEVICE);
1147
1148                                         vmxnet3_append_frag(ctx->skb, rcd, rbi);
1149                                         rbi->page = NULL;
1150                                 }
1151                         } else {
1152                                 /*
1153                                  * The only time a non-SOP buffer is type 0 is
1154                                  * when it's EOP and error flag is raised, which
1155                                  * has already been handled.
1156                                  */
1157                                 BUG_ON(true);
1158                         }
1159                 }
1160
1161                 skb = ctx->skb;
1162                 if (rcd->eop) {
1163                         skb->len += skb->data_len;
1164                         skb->truesize += skb->data_len;
1165
1166                         vmxnet3_rx_csum(adapter, skb,
1167                                         (union Vmxnet3_GenericDesc *)rcd);
1168                         skb->protocol = eth_type_trans(skb, adapter->netdev);
1169
1170                         if (unlikely(adapter->vlan_grp && rcd->ts)) {
1171                                 vlan_hwaccel_receive_skb(skb,
1172                                                 adapter->vlan_grp, rcd->tci);
1173                         } else {
1174                                 netif_receive_skb(skb);
1175                         }
1176
1177                         adapter->netdev->last_rx = jiffies;
1178                         ctx->skb = NULL;
1179                 }
1180
1181 rcd_done:
1182                 /* device may skip some rx descs */
1183                 rq->rx_ring[ring_idx].next2comp = idx;
1184                 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
1185                                           rq->rx_ring[ring_idx].size);
1186
1187                 /* refill rx buffers frequently to avoid starving the h/w */
1188                 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
1189                                                            ring_idx);
1190                 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
1191                                                         ring_idx, adapter))) {
1192                         vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
1193                                                 adapter);
1194
1195                         /* if needed, update the register */
1196                         if (unlikely(rq->shared->updateRxProd)) {
1197                                 VMXNET3_WRITE_BAR0_REG(adapter,
1198                                         rxprod_reg[ring_idx] + rq->qid * 8,
1199                                         rq->rx_ring[ring_idx].next2fill);
1200                                 rq->uncommitted[ring_idx] = 0;
1201                         }
1202                 }
1203
1204                 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1205                 vmxnet3_getRxComp(rcd,
1206                      &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1207         }
1208
1209         return num_rxd;
1210 }
1211
1212
1213 static void
1214 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1215                    struct vmxnet3_adapter *adapter)
1216 {
1217         u32 i, ring_idx;
1218         struct Vmxnet3_RxDesc *rxd;
1219
1220         for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1221                 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1222 #ifdef __BIG_ENDIAN_BITFIELD
1223                         struct Vmxnet3_RxDesc rxDesc;
1224 #endif
1225                         vmxnet3_getRxDesc(rxd,
1226                                 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1227
1228                         if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1229                                         rq->buf_info[ring_idx][i].skb) {
1230                                 pci_unmap_single(adapter->pdev, rxd->addr,
1231                                                  rxd->len, PCI_DMA_FROMDEVICE);
1232                                 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1233                                 rq->buf_info[ring_idx][i].skb = NULL;
1234                         } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1235                                         rq->buf_info[ring_idx][i].page) {
1236                                 pci_unmap_page(adapter->pdev, rxd->addr,
1237                                                rxd->len, PCI_DMA_FROMDEVICE);
1238                                 put_page(rq->buf_info[ring_idx][i].page);
1239                                 rq->buf_info[ring_idx][i].page = NULL;
1240                         }
1241                 }
1242
1243                 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1244                 rq->rx_ring[ring_idx].next2fill =
1245                                         rq->rx_ring[ring_idx].next2comp = 0;
1246                 rq->uncommitted[ring_idx] = 0;
1247         }
1248
1249         rq->comp_ring.gen = VMXNET3_INIT_GEN;
1250         rq->comp_ring.next2proc = 0;
1251 }
1252
1253
1254 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1255                         struct vmxnet3_adapter *adapter)
1256 {
1257         int i;
1258         int j;
1259
1260         /* all rx buffers must have already been freed */
1261         for (i = 0; i < 2; i++) {
1262                 if (rq->buf_info[i]) {
1263                         for (j = 0; j < rq->rx_ring[i].size; j++)
1264                                 BUG_ON(rq->buf_info[i][j].page != NULL);
1265                 }
1266         }
1267
1268
1269         kfree(rq->buf_info[0]);
1270
1271         for (i = 0; i < 2; i++) {
1272                 if (rq->rx_ring[i].base) {
1273                         pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1274                                             * sizeof(struct Vmxnet3_RxDesc),
1275                                             rq->rx_ring[i].base,
1276                                             rq->rx_ring[i].basePA);
1277                         rq->rx_ring[i].base = NULL;
1278                 }
1279                 rq->buf_info[i] = NULL;
1280         }
1281
1282         if (rq->comp_ring.base) {
1283                 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1284                                     sizeof(struct Vmxnet3_RxCompDesc),
1285                                     rq->comp_ring.base, rq->comp_ring.basePA);
1286                 rq->comp_ring.base = NULL;
1287         }
1288 }
1289
1290
1291 static int
1292 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1293                 struct vmxnet3_adapter  *adapter)
1294 {
1295         int i;
1296
1297         /* initialize buf_info */
1298         for (i = 0; i < rq->rx_ring[0].size; i++) {
1299
1300                 /* 1st buf for a pkt is skbuff */
1301                 if (i % adapter->rx_buf_per_pkt == 0) {
1302                         rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1303                         rq->buf_info[0][i].len = adapter->skb_buf_size;
1304                 } else { /* subsequent bufs for a pkt is frag */
1305                         rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1306                         rq->buf_info[0][i].len = PAGE_SIZE;
1307                 }
1308         }
1309         for (i = 0; i < rq->rx_ring[1].size; i++) {
1310                 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1311                 rq->buf_info[1][i].len = PAGE_SIZE;
1312         }
1313
1314         /* reset internal state and allocate buffers for both rings */
1315         for (i = 0; i < 2; i++) {
1316                 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1317                 rq->uncommitted[i] = 0;
1318
1319                 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1320                        sizeof(struct Vmxnet3_RxDesc));
1321                 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1322         }
1323         if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1324                                     adapter) == 0) {
1325                 /* at least has 1 rx buffer for the 1st ring */
1326                 return -ENOMEM;
1327         }
1328         vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1329
1330         /* reset the comp ring */
1331         rq->comp_ring.next2proc = 0;
1332         memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1333                sizeof(struct Vmxnet3_RxCompDesc));
1334         rq->comp_ring.gen = VMXNET3_INIT_GEN;
1335
1336         /* reset rxctx */
1337         rq->rx_ctx.skb = NULL;
1338
1339         /* stats are not reset */
1340         return 0;
1341 }
1342
1343
1344 static int
1345 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1346 {
1347         int i;
1348         size_t sz;
1349         struct vmxnet3_rx_buf_info *bi;
1350
1351         for (i = 0; i < 2; i++) {
1352
1353                 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1354                 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1355                                                         &rq->rx_ring[i].basePA);
1356                 if (!rq->rx_ring[i].base) {
1357                         printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1358                                adapter->netdev->name, i);
1359                         goto err;
1360                 }
1361         }
1362
1363         sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1364         rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1365                                                   &rq->comp_ring.basePA);
1366         if (!rq->comp_ring.base) {
1367                 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1368                        adapter->netdev->name);
1369                 goto err;
1370         }
1371
1372         sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1373                                                    rq->rx_ring[1].size);
1374         bi = kmalloc(sz, GFP_KERNEL);
1375         if (!bi) {
1376                 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1377                        adapter->netdev->name);
1378                 goto err;
1379         }
1380         memset(bi, 0, sz);
1381         rq->buf_info[0] = bi;
1382         rq->buf_info[1] = bi + rq->rx_ring[0].size;
1383
1384         return 0;
1385
1386 err:
1387         vmxnet3_rq_destroy(rq, adapter);
1388         return -ENOMEM;
1389 }
1390
1391
1392 static int
1393 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1394 {
1395         if (unlikely(adapter->shared->ecr))
1396                 vmxnet3_process_events(adapter);
1397
1398         vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
1399         return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
1400 }
1401
1402
1403 static int
1404 vmxnet3_poll(struct napi_struct *napi, int budget)
1405 {
1406         struct vmxnet3_adapter *adapter = container_of(napi,
1407                                           struct vmxnet3_adapter, napi);
1408         int rxd_done;
1409
1410         rxd_done = vmxnet3_do_poll(adapter, budget);
1411
1412         if (rxd_done < budget) {
1413                 napi_complete(napi);
1414                 vmxnet3_enable_intr(adapter, 0);
1415         }
1416         return rxd_done;
1417 }
1418
1419
1420 /* Interrupt handler for vmxnet3  */
1421 static irqreturn_t
1422 vmxnet3_intr(int irq, void *dev_id)
1423 {
1424         struct net_device *dev = dev_id;
1425         struct vmxnet3_adapter *adapter = netdev_priv(dev);
1426
1427         if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
1428                 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1429                 if (unlikely(icr == 0))
1430                         /* not ours */
1431                         return IRQ_NONE;
1432         }
1433
1434
1435         /* disable intr if needed */
1436         if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1437                 vmxnet3_disable_intr(adapter, 0);
1438
1439         napi_schedule(&adapter->napi);
1440
1441         return IRQ_HANDLED;
1442 }
1443
1444 #ifdef CONFIG_NET_POLL_CONTROLLER
1445
1446
1447 /* netpoll callback. */
1448 static void
1449 vmxnet3_netpoll(struct net_device *netdev)
1450 {
1451         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1452         int irq;
1453
1454 #ifdef CONFIG_PCI_MSI
1455         if (adapter->intr.type == VMXNET3_IT_MSIX)
1456                 irq = adapter->intr.msix_entries[0].vector;
1457         else
1458 #endif
1459                 irq = adapter->pdev->irq;
1460
1461         disable_irq(irq);
1462         vmxnet3_intr(irq, netdev);
1463         enable_irq(irq);
1464 }
1465 #endif
1466
1467 static int
1468 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1469 {
1470         int err;
1471
1472 #ifdef CONFIG_PCI_MSI
1473         if (adapter->intr.type == VMXNET3_IT_MSIX) {
1474                 /* we only use 1 MSI-X vector */
1475                 err = request_irq(adapter->intr.msix_entries[0].vector,
1476                                   vmxnet3_intr, 0, adapter->netdev->name,
1477                                   adapter->netdev);
1478         } else if (adapter->intr.type == VMXNET3_IT_MSI) {
1479                 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1480                                   adapter->netdev->name, adapter->netdev);
1481         } else
1482 #endif
1483         {
1484                 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1485                                   IRQF_SHARED, adapter->netdev->name,
1486                                   adapter->netdev);
1487         }
1488
1489         if (err)
1490                 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1491                        ":%d\n", adapter->netdev->name, adapter->intr.type, err);
1492
1493
1494         if (!err) {
1495                 int i;
1496                 /* init our intr settings */
1497                 for (i = 0; i < adapter->intr.num_intrs; i++)
1498                         adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
1499
1500                 /* next setup intr index for all intr sources */
1501                 adapter->tx_queue.comp_ring.intr_idx = 0;
1502                 adapter->rx_queue.comp_ring.intr_idx = 0;
1503                 adapter->intr.event_intr_idx = 0;
1504
1505                 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1506                        "allocated\n", adapter->netdev->name, adapter->intr.type,
1507                        adapter->intr.mask_mode, adapter->intr.num_intrs);
1508         }
1509
1510         return err;
1511 }
1512
1513
1514 static void
1515 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1516 {
1517         BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
1518                adapter->intr.num_intrs <= 0);
1519
1520         switch (adapter->intr.type) {
1521 #ifdef CONFIG_PCI_MSI
1522         case VMXNET3_IT_MSIX:
1523         {
1524                 int i;
1525
1526                 for (i = 0; i < adapter->intr.num_intrs; i++)
1527                         free_irq(adapter->intr.msix_entries[i].vector,
1528                                  adapter->netdev);
1529                 break;
1530         }
1531 #endif
1532         case VMXNET3_IT_MSI:
1533                 free_irq(adapter->pdev->irq, adapter->netdev);
1534                 break;
1535         case VMXNET3_IT_INTX:
1536                 free_irq(adapter->pdev->irq, adapter->netdev);
1537                 break;
1538         default:
1539                 BUG_ON(true);
1540         }
1541 }
1542
1543
1544 inline void set_flag_le16(__le16 *data, u16 flag)
1545 {
1546         *data = cpu_to_le16(le16_to_cpu(*data) | flag);
1547 }
1548
1549 inline void set_flag_le64(__le64 *data, u64 flag)
1550 {
1551         *data = cpu_to_le64(le64_to_cpu(*data) | flag);
1552 }
1553
1554 inline void reset_flag_le64(__le64 *data, u64 flag)
1555 {
1556         *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
1557 }
1558
1559
1560 static void
1561 vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1562 {
1563         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1564         struct Vmxnet3_DriverShared *shared = adapter->shared;
1565         u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1566
1567         if (grp) {
1568                 /* add vlan rx stripping. */
1569                 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1570                         int i;
1571                         struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1572                         adapter->vlan_grp = grp;
1573
1574                         /* update FEATURES to device */
1575                         set_flag_le64(&devRead->misc.uptFeatures,
1576                                       UPT1_F_RXVLAN);
1577                         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1578                                                VMXNET3_CMD_UPDATE_FEATURE);
1579                         /*
1580                          *  Clear entire vfTable; then enable untagged pkts.
1581                          *  Note: setting one entry in vfTable to non-zero turns
1582                          *  on VLAN rx filtering.
1583                          */
1584                         for (i = 0; i < VMXNET3_VFT_SIZE; i++)
1585                                 vfTable[i] = 0;
1586
1587                         VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1588                         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1589                                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1590                 } else {
1591                         printk(KERN_ERR "%s: vlan_rx_register when device has "
1592                                "no NETIF_F_HW_VLAN_RX\n", netdev->name);
1593                 }
1594         } else {
1595                 /* remove vlan rx stripping. */
1596                 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1597                 adapter->vlan_grp = NULL;
1598
1599                 if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
1600                         int i;
1601
1602                         for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
1603                                 /* clear entire vfTable; this also disables
1604                                  * VLAN rx filtering
1605                                  */
1606                                 vfTable[i] = 0;
1607                         }
1608                         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1609                                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1610
1611                         /* update FEATURES to device */
1612                         reset_flag_le64(&devRead->misc.uptFeatures,
1613                                         UPT1_F_RXVLAN);
1614                         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1615                                                VMXNET3_CMD_UPDATE_FEATURE);
1616                 }
1617         }
1618 }
1619
1620
1621 static void
1622 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1623 {
1624         if (adapter->vlan_grp) {
1625                 u16 vid;
1626                 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1627                 bool activeVlan = false;
1628
1629                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1630                         if (vlan_group_get_device(adapter->vlan_grp, vid)) {
1631                                 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1632                                 activeVlan = true;
1633                         }
1634                 }
1635                 if (activeVlan) {
1636                         /* continue to allow untagged pkts */
1637                         VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1638                 }
1639         }
1640 }
1641
1642
1643 static void
1644 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1645 {
1646         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1647         u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1648
1649         VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1650         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1651                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1652 }
1653
1654
1655 static void
1656 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1657 {
1658         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1659         u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1660
1661         VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1662         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1663                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1664 }
1665
1666
1667 static u8 *
1668 vmxnet3_copy_mc(struct net_device *netdev)
1669 {
1670         u8 *buf = NULL;
1671         u32 sz = netdev->mc_count * ETH_ALEN;
1672
1673         /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1674         if (sz <= 0xffff) {
1675                 /* We may be called with BH disabled */
1676                 buf = kmalloc(sz, GFP_ATOMIC);
1677                 if (buf) {
1678                         int i;
1679                         struct dev_mc_list *mc = netdev->mc_list;
1680
1681                         for (i = 0; i < netdev->mc_count; i++) {
1682                                 BUG_ON(!mc);
1683                                 memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
1684                                        ETH_ALEN);
1685                                 mc = mc->next;
1686                         }
1687                 }
1688         }
1689         return buf;
1690 }
1691
1692
1693 static void
1694 vmxnet3_set_mc(struct net_device *netdev)
1695 {
1696         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1697         struct Vmxnet3_RxFilterConf *rxConf =
1698                                         &adapter->shared->devRead.rxFilterConf;
1699         u8 *new_table = NULL;
1700         u32 new_mode = VMXNET3_RXM_UCAST;
1701
1702         if (netdev->flags & IFF_PROMISC)
1703                 new_mode |= VMXNET3_RXM_PROMISC;
1704
1705         if (netdev->flags & IFF_BROADCAST)
1706                 new_mode |= VMXNET3_RXM_BCAST;
1707
1708         if (netdev->flags & IFF_ALLMULTI)
1709                 new_mode |= VMXNET3_RXM_ALL_MULTI;
1710         else
1711                 if (netdev->mc_count > 0) {
1712                         new_table = vmxnet3_copy_mc(netdev);
1713                         if (new_table) {
1714                                 new_mode |= VMXNET3_RXM_MCAST;
1715                                 rxConf->mfTableLen = cpu_to_le16(
1716                                                 netdev->mc_count * ETH_ALEN);
1717                                 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
1718                                                     new_table));
1719                         } else {
1720                                 printk(KERN_INFO "%s: failed to copy mcast list"
1721                                        ", setting ALL_MULTI\n", netdev->name);
1722                                 new_mode |= VMXNET3_RXM_ALL_MULTI;
1723                         }
1724                 }
1725
1726
1727         if (!(new_mode & VMXNET3_RXM_MCAST)) {
1728                 rxConf->mfTableLen = 0;
1729                 rxConf->mfTablePA = 0;
1730         }
1731
1732         if (new_mode != rxConf->rxMode) {
1733                 rxConf->rxMode = cpu_to_le32(new_mode);
1734                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1735                                        VMXNET3_CMD_UPDATE_RX_MODE);
1736         }
1737
1738         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1739                                VMXNET3_CMD_UPDATE_MAC_FILTERS);
1740
1741         kfree(new_table);
1742 }
1743
1744
1745 /*
1746  *   Set up driver_shared based on settings in adapter.
1747  */
1748
1749 static void
1750 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1751 {
1752         struct Vmxnet3_DriverShared *shared = adapter->shared;
1753         struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1754         struct Vmxnet3_TxQueueConf *tqc;
1755         struct Vmxnet3_RxQueueConf *rqc;
1756         int i;
1757
1758         memset(shared, 0, sizeof(*shared));
1759
1760         /* driver settings */
1761         shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
1762         devRead->misc.driverInfo.version = cpu_to_le32(
1763                                                 VMXNET3_DRIVER_VERSION_NUM);
1764         devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1765                                 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1766         devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1767         *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
1768                                 *((u32 *)&devRead->misc.driverInfo.gos));
1769         devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
1770         devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
1771
1772         devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
1773         devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
1774
1775         /* set up feature flags */
1776         if (adapter->rxcsum)
1777                 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
1778
1779         if (adapter->lro) {
1780                 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
1781                 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
1782         }
1783         if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
1784             adapter->vlan_grp) {
1785                 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
1786         }
1787
1788         devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
1789         devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
1790         devRead->misc.queueDescLen = cpu_to_le32(
1791                                      sizeof(struct Vmxnet3_TxQueueDesc) +
1792                                      sizeof(struct Vmxnet3_RxQueueDesc));
1793
1794         /* tx queue settings */
1795         BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1796
1797         devRead->misc.numTxQueues = 1;
1798         tqc = &adapter->tqd_start->conf;
1799         tqc->txRingBasePA   = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
1800         tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
1801         tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
1802         tqc->ddPA           = cpu_to_le64(virt_to_phys(
1803                                                 adapter->tx_queue.buf_info));
1804         tqc->txRingSize     = cpu_to_le32(adapter->tx_queue.tx_ring.size);
1805         tqc->dataRingSize   = cpu_to_le32(adapter->tx_queue.data_ring.size);
1806         tqc->compRingSize   = cpu_to_le32(adapter->tx_queue.comp_ring.size);
1807         tqc->ddLen          = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
1808                               tqc->txRingSize);
1809         tqc->intrIdx        = adapter->tx_queue.comp_ring.intr_idx;
1810
1811         /* rx queue settings */
1812         devRead->misc.numRxQueues = 1;
1813         rqc = &adapter->rqd_start->conf;
1814         rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
1815         rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
1816         rqc->compRingBasePA  = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
1817         rqc->ddPA            = cpu_to_le64(virt_to_phys(
1818                                                 adapter->rx_queue.buf_info));
1819         rqc->rxRingSize[0]   = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
1820         rqc->rxRingSize[1]   = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
1821         rqc->compRingSize    = cpu_to_le32(adapter->rx_queue.comp_ring.size);
1822         rqc->ddLen           = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
1823                                (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
1824         rqc->intrIdx         = adapter->rx_queue.comp_ring.intr_idx;
1825
1826         /* intr settings */
1827         devRead->intrConf.autoMask = adapter->intr.mask_mode ==
1828                                      VMXNET3_IMM_AUTO;
1829         devRead->intrConf.numIntrs = adapter->intr.num_intrs;
1830         for (i = 0; i < adapter->intr.num_intrs; i++)
1831                 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
1832
1833         devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
1834
1835         /* rx filter settings */
1836         devRead->rxFilterConf.rxMode = 0;
1837         vmxnet3_restore_vlan(adapter);
1838         /* the rest are already zeroed */
1839 }
1840
1841
1842 int
1843 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1844 {
1845         int err;
1846         u32 ret;
1847
1848         dev_dbg(&adapter->netdev->dev,
1849                 "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
1850                 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
1851                 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
1852                 adapter->rx_queue.rx_ring[0].size,
1853                 adapter->rx_queue.rx_ring[1].size);
1854
1855         vmxnet3_tq_init(&adapter->tx_queue, adapter);
1856         err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
1857         if (err) {
1858                 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
1859                        adapter->netdev->name, err);
1860                 goto rq_err;
1861         }
1862
1863         err = vmxnet3_request_irqs(adapter);
1864         if (err) {
1865                 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
1866                        adapter->netdev->name, err);
1867                 goto irq_err;
1868         }
1869
1870         vmxnet3_setup_driver_shared(adapter);
1871
1872         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
1873                                adapter->shared_pa));
1874         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
1875                                adapter->shared_pa));
1876         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1877                                VMXNET3_CMD_ACTIVATE_DEV);
1878         ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
1879
1880         if (ret != 0) {
1881                 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
1882                        adapter->netdev->name, ret);
1883                 err = -EINVAL;
1884                 goto activate_err;
1885         }
1886         VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
1887                                adapter->rx_queue.rx_ring[0].next2fill);
1888         VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
1889                                adapter->rx_queue.rx_ring[1].next2fill);
1890
1891         /* Apply the rx filter settins last. */
1892         vmxnet3_set_mc(adapter->netdev);
1893
1894         /*
1895          * Check link state when first activating device. It will start the
1896          * tx queue if the link is up.
1897          */
1898         vmxnet3_check_link(adapter);
1899
1900         napi_enable(&adapter->napi);
1901         vmxnet3_enable_all_intrs(adapter);
1902         clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
1903         return 0;
1904
1905 activate_err:
1906         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
1907         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
1908         vmxnet3_free_irqs(adapter);
1909 irq_err:
1910 rq_err:
1911         /* free up buffers we allocated */
1912         vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1913         return err;
1914 }
1915
1916
1917 void
1918 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
1919 {
1920         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1921 }
1922
1923
1924 int
1925 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1926 {
1927         if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
1928                 return 0;
1929
1930
1931         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1932                                VMXNET3_CMD_QUIESCE_DEV);
1933         vmxnet3_disable_all_intrs(adapter);
1934
1935         napi_disable(&adapter->napi);
1936         netif_tx_disable(adapter->netdev);
1937         adapter->link_speed = 0;
1938         netif_carrier_off(adapter->netdev);
1939
1940         vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
1941         vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1942         vmxnet3_free_irqs(adapter);
1943         return 0;
1944 }
1945
1946
1947 static void
1948 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
1949 {
1950         u32 tmp;
1951
1952         tmp = *(u32 *)mac;
1953         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
1954
1955         tmp = (mac[5] << 8) | mac[4];
1956         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
1957 }
1958
1959
1960 static int
1961 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
1962 {
1963         struct sockaddr *addr = p;
1964         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1965
1966         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1967         vmxnet3_write_mac_addr(adapter, addr->sa_data);
1968
1969         return 0;
1970 }
1971
1972
1973 /* ==================== initialization and cleanup routines ============ */
1974
1975 static int
1976 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
1977 {
1978         int err;
1979         unsigned long mmio_start, mmio_len;
1980         struct pci_dev *pdev = adapter->pdev;
1981
1982         err = pci_enable_device(pdev);
1983         if (err) {
1984                 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
1985                        pci_name(pdev), err);
1986                 return err;
1987         }
1988
1989         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1990                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
1991                         printk(KERN_ERR "pci_set_consistent_dma_mask failed "
1992                                "for adapter %s\n", pci_name(pdev));
1993                         err = -EIO;
1994                         goto err_set_mask;
1995                 }
1996                 *dma64 = true;
1997         } else {
1998                 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
1999                         printk(KERN_ERR "pci_set_dma_mask failed for adapter "
2000                                "%s\n",  pci_name(pdev));
2001                         err = -EIO;
2002                         goto err_set_mask;
2003                 }
2004                 *dma64 = false;
2005         }
2006
2007         err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2008                                            vmxnet3_driver_name);
2009         if (err) {
2010                 printk(KERN_ERR "Failed to request region for adapter %s: "
2011                        "error %d\n", pci_name(pdev), err);
2012                 goto err_set_mask;
2013         }
2014
2015         pci_set_master(pdev);
2016
2017         mmio_start = pci_resource_start(pdev, 0);
2018         mmio_len = pci_resource_len(pdev, 0);
2019         adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2020         if (!adapter->hw_addr0) {
2021                 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
2022                        pci_name(pdev));
2023                 err = -EIO;
2024                 goto err_ioremap;
2025         }
2026
2027         mmio_start = pci_resource_start(pdev, 1);
2028         mmio_len = pci_resource_len(pdev, 1);
2029         adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2030         if (!adapter->hw_addr1) {
2031                 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
2032                        pci_name(pdev));
2033                 err = -EIO;
2034                 goto err_bar1;
2035         }
2036         return 0;
2037
2038 err_bar1:
2039         iounmap(adapter->hw_addr0);
2040 err_ioremap:
2041         pci_release_selected_regions(pdev, (1 << 2) - 1);
2042 err_set_mask:
2043         pci_disable_device(pdev);
2044         return err;
2045 }
2046
2047
2048 static void
2049 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2050 {
2051         BUG_ON(!adapter->pdev);
2052
2053         iounmap(adapter->hw_addr0);
2054         iounmap(adapter->hw_addr1);
2055         pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2056         pci_disable_device(adapter->pdev);
2057 }
2058
2059
2060 static void
2061 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2062 {
2063         size_t sz;
2064
2065         if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2066                                     VMXNET3_MAX_ETH_HDR_SIZE) {
2067                 adapter->skb_buf_size = adapter->netdev->mtu +
2068                                         VMXNET3_MAX_ETH_HDR_SIZE;
2069                 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2070                         adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2071
2072                 adapter->rx_buf_per_pkt = 1;
2073         } else {
2074                 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2075                 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2076                                             VMXNET3_MAX_ETH_HDR_SIZE;
2077                 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2078         }
2079
2080         /*
2081          * for simplicity, force the ring0 size to be a multiple of
2082          * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2083          */
2084         sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2085         adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
2086                                              sz - 1) / sz * sz;
2087         adapter->rx_queue.rx_ring[0].size = min_t(u32,
2088                                             adapter->rx_queue.rx_ring[0].size,
2089                                             VMXNET3_RX_RING_MAX_SIZE / sz * sz);
2090 }
2091
2092
2093 int
2094 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2095                       u32 rx_ring_size, u32 rx_ring2_size)
2096 {
2097         int err;
2098
2099         adapter->tx_queue.tx_ring.size   = tx_ring_size;
2100         adapter->tx_queue.data_ring.size = tx_ring_size;
2101         adapter->tx_queue.comp_ring.size = tx_ring_size;
2102         adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
2103         adapter->tx_queue.stopped = true;
2104         err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
2105         if (err)
2106                 return err;
2107
2108         adapter->rx_queue.rx_ring[0].size = rx_ring_size;
2109         adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
2110         vmxnet3_adjust_rx_ring_size(adapter);
2111         adapter->rx_queue.comp_ring.size  = adapter->rx_queue.rx_ring[0].size +
2112                                             adapter->rx_queue.rx_ring[1].size;
2113         adapter->rx_queue.qid  = 0;
2114         adapter->rx_queue.qid2 = 1;
2115         adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
2116         err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
2117         if (err)
2118                 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
2119
2120         return err;
2121 }
2122
2123 static int
2124 vmxnet3_open(struct net_device *netdev)
2125 {
2126         struct vmxnet3_adapter *adapter;
2127         int err;
2128
2129         adapter = netdev_priv(netdev);
2130
2131         spin_lock_init(&adapter->tx_queue.tx_lock);
2132
2133         err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2134                                     VMXNET3_DEF_RX_RING_SIZE,
2135                                     VMXNET3_DEF_RX_RING_SIZE);
2136         if (err)
2137                 goto queue_err;
2138
2139         err = vmxnet3_activate_dev(adapter);
2140         if (err)
2141                 goto activate_err;
2142
2143         return 0;
2144
2145 activate_err:
2146         vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
2147         vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
2148 queue_err:
2149         return err;
2150 }
2151
2152
2153 static int
2154 vmxnet3_close(struct net_device *netdev)
2155 {
2156         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2157
2158         /*
2159          * Reset_work may be in the middle of resetting the device, wait for its
2160          * completion.
2161          */
2162         while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2163                 msleep(1);
2164
2165         vmxnet3_quiesce_dev(adapter);
2166
2167         vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
2168         vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
2169
2170         clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2171
2172
2173         return 0;
2174 }
2175
2176
2177 void
2178 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2179 {
2180         /*
2181          * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2182          * vmxnet3_close() will deadlock.
2183          */
2184         BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2185
2186         /* we need to enable NAPI, otherwise dev_close will deadlock */
2187         napi_enable(&adapter->napi);
2188         dev_close(adapter->netdev);
2189 }
2190
2191
2192 static int
2193 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2194 {
2195         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2196         int err = 0;
2197
2198         if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2199                 return -EINVAL;
2200
2201         if (new_mtu > 1500 && !adapter->jumbo_frame)
2202                 return -EINVAL;
2203
2204         netdev->mtu = new_mtu;
2205
2206         /*
2207          * Reset_work may be in the middle of resetting the device, wait for its
2208          * completion.
2209          */
2210         while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2211                 msleep(1);
2212
2213         if (netif_running(netdev)) {
2214                 vmxnet3_quiesce_dev(adapter);
2215                 vmxnet3_reset_dev(adapter);
2216
2217                 /* we need to re-create the rx queue based on the new mtu */
2218                 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
2219                 vmxnet3_adjust_rx_ring_size(adapter);
2220                 adapter->rx_queue.comp_ring.size  =
2221                                         adapter->rx_queue.rx_ring[0].size +
2222                                         adapter->rx_queue.rx_ring[1].size;
2223                 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
2224                 if (err) {
2225                         printk(KERN_ERR "%s: failed to re-create rx queue,"
2226                                 " error %d. Closing it.\n", netdev->name, err);
2227                         goto out;
2228                 }
2229
2230                 err = vmxnet3_activate_dev(adapter);
2231                 if (err) {
2232                         printk(KERN_ERR "%s: failed to re-activate, error %d. "
2233                                 "Closing it\n", netdev->name, err);
2234                         goto out;
2235                 }
2236         }
2237
2238 out:
2239         clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2240         if (err)
2241                 vmxnet3_force_close(adapter);
2242
2243         return err;
2244 }
2245
2246
2247 static void
2248 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2249 {
2250         struct net_device *netdev = adapter->netdev;
2251
2252         netdev->features = NETIF_F_SG |
2253                 NETIF_F_HW_CSUM |
2254                 NETIF_F_HW_VLAN_TX |
2255                 NETIF_F_HW_VLAN_RX |
2256                 NETIF_F_HW_VLAN_FILTER |
2257                 NETIF_F_TSO |
2258                 NETIF_F_TSO6 |
2259                 NETIF_F_LRO;
2260
2261         printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
2262
2263         adapter->rxcsum = true;
2264         adapter->jumbo_frame = true;
2265         adapter->lro = true;
2266
2267         if (dma64) {
2268                 netdev->features |= NETIF_F_HIGHDMA;
2269                 printk(" highDMA");
2270         }
2271
2272         netdev->vlan_features = netdev->features;
2273         printk("\n");
2274 }
2275
2276
2277 static void
2278 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2279 {
2280         u32 tmp;
2281
2282         tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2283         *(u32 *)mac = tmp;
2284
2285         tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2286         mac[4] = tmp & 0xff;
2287         mac[5] = (tmp >> 8) & 0xff;
2288 }
2289
2290
2291 static void
2292 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2293 {
2294         u32 cfg;
2295
2296         /* intr settings */
2297         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2298                                VMXNET3_CMD_GET_CONF_INTR);
2299         cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2300         adapter->intr.type = cfg & 0x3;
2301         adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2302
2303         if (adapter->intr.type == VMXNET3_IT_AUTO) {
2304                 int err;
2305
2306 #ifdef CONFIG_PCI_MSI
2307                 adapter->intr.msix_entries[0].entry = 0;
2308                 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2309                                       VMXNET3_LINUX_MAX_MSIX_VECT);
2310                 if (!err) {
2311                         adapter->intr.num_intrs = 1;
2312                         adapter->intr.type = VMXNET3_IT_MSIX;
2313                         return;
2314                 }
2315 #endif
2316
2317                 err = pci_enable_msi(adapter->pdev);
2318                 if (!err) {
2319                         adapter->intr.num_intrs = 1;
2320                         adapter->intr.type = VMXNET3_IT_MSI;
2321                         return;
2322                 }
2323         }
2324
2325         adapter->intr.type = VMXNET3_IT_INTX;
2326
2327         /* INT-X related setting */
2328         adapter->intr.num_intrs = 1;
2329 }
2330
2331
2332 static void
2333 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2334 {
2335         if (adapter->intr.type == VMXNET3_IT_MSIX)
2336                 pci_disable_msix(adapter->pdev);
2337         else if (adapter->intr.type == VMXNET3_IT_MSI)
2338                 pci_disable_msi(adapter->pdev);
2339         else
2340                 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2341 }
2342
2343
2344 static void
2345 vmxnet3_tx_timeout(struct net_device *netdev)
2346 {
2347         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2348         adapter->tx_timeout_count++;
2349
2350         printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2351         schedule_work(&adapter->work);
2352 }
2353
2354
2355 static void
2356 vmxnet3_reset_work(struct work_struct *data)
2357 {
2358         struct vmxnet3_adapter *adapter;
2359
2360         adapter = container_of(data, struct vmxnet3_adapter, work);
2361
2362         /* if another thread is resetting the device, no need to proceed */
2363         if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2364                 return;
2365
2366         /* if the device is closed, we must leave it alone */
2367         if (netif_running(adapter->netdev)) {
2368                 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2369                 vmxnet3_quiesce_dev(adapter);
2370                 vmxnet3_reset_dev(adapter);
2371                 vmxnet3_activate_dev(adapter);
2372         } else {
2373                 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2374         }
2375
2376         clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2377 }
2378
2379
2380 static int __devinit
2381 vmxnet3_probe_device(struct pci_dev *pdev,
2382                      const struct pci_device_id *id)
2383 {
2384         static const struct net_device_ops vmxnet3_netdev_ops = {
2385                 .ndo_open = vmxnet3_open,
2386                 .ndo_stop = vmxnet3_close,
2387                 .ndo_start_xmit = vmxnet3_xmit_frame,
2388                 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2389                 .ndo_change_mtu = vmxnet3_change_mtu,
2390                 .ndo_get_stats = vmxnet3_get_stats,
2391                 .ndo_tx_timeout = vmxnet3_tx_timeout,
2392                 .ndo_set_multicast_list = vmxnet3_set_mc,
2393                 .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
2394                 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2395                 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2396 #ifdef CONFIG_NET_POLL_CONTROLLER
2397                 .ndo_poll_controller = vmxnet3_netpoll,
2398 #endif
2399         };
2400         int err;
2401         bool dma64 = false; /* stupid gcc */
2402         u32 ver;
2403         struct net_device *netdev;
2404         struct vmxnet3_adapter *adapter;
2405         u8 mac[ETH_ALEN];
2406
2407         netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
2408         if (!netdev) {
2409                 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2410                         "%s\n", pci_name(pdev));
2411                 return -ENOMEM;
2412         }
2413
2414         pci_set_drvdata(pdev, netdev);
2415         adapter = netdev_priv(netdev);
2416         adapter->netdev = netdev;
2417         adapter->pdev = pdev;
2418
2419         adapter->shared = pci_alloc_consistent(adapter->pdev,
2420                           sizeof(struct Vmxnet3_DriverShared),
2421                           &adapter->shared_pa);
2422         if (!adapter->shared) {
2423                 printk(KERN_ERR "Failed to allocate memory for %s\n",
2424                         pci_name(pdev));
2425                 err = -ENOMEM;
2426                 goto err_alloc_shared;
2427         }
2428
2429         adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
2430                              sizeof(struct Vmxnet3_TxQueueDesc) +
2431                              sizeof(struct Vmxnet3_RxQueueDesc),
2432                              &adapter->queue_desc_pa);
2433
2434         if (!adapter->tqd_start) {
2435                 printk(KERN_ERR "Failed to allocate memory for %s\n",
2436                         pci_name(pdev));
2437                 err = -ENOMEM;
2438                 goto err_alloc_queue_desc;
2439         }
2440         adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
2441                                                             + 1);
2442
2443         adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2444         if (adapter->pm_conf == NULL) {
2445                 printk(KERN_ERR "Failed to allocate memory for %s\n",
2446                         pci_name(pdev));
2447                 err = -ENOMEM;
2448                 goto err_alloc_pm;
2449         }
2450
2451         err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2452         if (err < 0)
2453                 goto err_alloc_pci;
2454
2455         ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2456         if (ver & 1) {
2457                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2458         } else {
2459                 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2460                        " %s\n", ver, pci_name(pdev));
2461                 err = -EBUSY;
2462                 goto err_ver;
2463         }
2464
2465         ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2466         if (ver & 1) {
2467                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
2468         } else {
2469                 printk(KERN_ERR "Incompatible upt version (0x%x) for "
2470                        "adapter %s\n", ver, pci_name(pdev));
2471                 err = -EBUSY;
2472                 goto err_ver;
2473         }
2474
2475         vmxnet3_declare_features(adapter, dma64);
2476
2477         adapter->dev_number = atomic_read(&devices_found);
2478         vmxnet3_alloc_intr_resources(adapter);
2479
2480         vmxnet3_read_mac_addr(adapter, mac);
2481         memcpy(netdev->dev_addr,  mac, netdev->addr_len);
2482
2483         netdev->netdev_ops = &vmxnet3_netdev_ops;
2484         netdev->watchdog_timeo = 5 * HZ;
2485         vmxnet3_set_ethtool_ops(netdev);
2486
2487         INIT_WORK(&adapter->work, vmxnet3_reset_work);
2488
2489         netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
2490         SET_NETDEV_DEV(netdev, &pdev->dev);
2491         err = register_netdev(netdev);
2492
2493         if (err) {
2494                 printk(KERN_ERR "Failed to register adapter %s\n",
2495                         pci_name(pdev));
2496                 goto err_register;
2497         }
2498
2499         set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2500         atomic_inc(&devices_found);
2501         return 0;
2502
2503 err_register:
2504         vmxnet3_free_intr_resources(adapter);
2505 err_ver:
2506         vmxnet3_free_pci_resources(adapter);
2507 err_alloc_pci:
2508         kfree(adapter->pm_conf);
2509 err_alloc_pm:
2510         pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2511                             sizeof(struct Vmxnet3_RxQueueDesc),
2512                             adapter->tqd_start, adapter->queue_desc_pa);
2513 err_alloc_queue_desc:
2514         pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2515                             adapter->shared, adapter->shared_pa);
2516 err_alloc_shared:
2517         pci_set_drvdata(pdev, NULL);
2518         free_netdev(netdev);
2519         return err;
2520 }
2521
2522
2523 static void __devexit
2524 vmxnet3_remove_device(struct pci_dev *pdev)
2525 {
2526         struct net_device *netdev = pci_get_drvdata(pdev);
2527         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2528
2529         flush_scheduled_work();
2530
2531         unregister_netdev(netdev);
2532
2533         vmxnet3_free_intr_resources(adapter);
2534         vmxnet3_free_pci_resources(adapter);
2535         kfree(adapter->pm_conf);
2536         pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2537                             sizeof(struct Vmxnet3_RxQueueDesc),
2538                             adapter->tqd_start, adapter->queue_desc_pa);
2539         pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2540                             adapter->shared, adapter->shared_pa);
2541         free_netdev(netdev);
2542 }
2543
2544
2545 #ifdef CONFIG_PM
2546
2547 static int
2548 vmxnet3_suspend(struct device *device)
2549 {
2550         struct pci_dev *pdev = to_pci_dev(device);
2551         struct net_device *netdev = pci_get_drvdata(pdev);
2552         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2553         struct Vmxnet3_PMConf *pmConf;
2554         struct ethhdr *ehdr;
2555         struct arphdr *ahdr;
2556         u8 *arpreq;
2557         struct in_device *in_dev;
2558         struct in_ifaddr *ifa;
2559         int i = 0;
2560
2561         if (!netif_running(netdev))
2562                 return 0;
2563
2564         vmxnet3_disable_all_intrs(adapter);
2565         vmxnet3_free_irqs(adapter);
2566         vmxnet3_free_intr_resources(adapter);
2567
2568         netif_device_detach(netdev);
2569         netif_stop_queue(netdev);
2570
2571         /* Create wake-up filters. */
2572         pmConf = adapter->pm_conf;
2573         memset(pmConf, 0, sizeof(*pmConf));
2574
2575         if (adapter->wol & WAKE_UCAST) {
2576                 pmConf->filters[i].patternSize = ETH_ALEN;
2577                 pmConf->filters[i].maskSize = 1;
2578                 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2579                 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2580
2581                 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2582                 i++;
2583         }
2584
2585         if (adapter->wol & WAKE_ARP) {
2586                 in_dev = in_dev_get(netdev);
2587                 if (!in_dev)
2588                         goto skip_arp;
2589
2590                 ifa = (struct in_ifaddr *)in_dev->ifa_list;
2591                 if (!ifa)
2592                         goto skip_arp;
2593
2594                 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
2595                         sizeof(struct arphdr) +         /* ARP header */
2596                         2 * ETH_ALEN +          /* 2 Ethernet addresses*/
2597                         2 * sizeof(u32);        /*2 IPv4 addresses */
2598                 pmConf->filters[i].maskSize =
2599                         (pmConf->filters[i].patternSize - 1) / 8 + 1;
2600
2601                 /* ETH_P_ARP in Ethernet header. */
2602                 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
2603                 ehdr->h_proto = htons(ETH_P_ARP);
2604
2605                 /* ARPOP_REQUEST in ARP header. */
2606                 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
2607                 ahdr->ar_op = htons(ARPOP_REQUEST);
2608                 arpreq = (u8 *)(ahdr + 1);
2609
2610                 /* The Unicast IPv4 address in 'tip' field. */
2611                 arpreq += 2 * ETH_ALEN + sizeof(u32);
2612                 *(u32 *)arpreq = ifa->ifa_address;
2613
2614                 /* The mask for the relevant bits. */
2615                 pmConf->filters[i].mask[0] = 0x00;
2616                 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
2617                 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
2618                 pmConf->filters[i].mask[3] = 0x00;
2619                 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
2620                 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2621                 in_dev_put(in_dev);
2622
2623                 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2624                 i++;
2625         }
2626
2627 skip_arp:
2628         if (adapter->wol & WAKE_MAGIC)
2629                 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
2630
2631         pmConf->numFilters = i;
2632
2633         adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2634         adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2635                                                                   *pmConf));
2636         adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
2637                                                                  pmConf));
2638
2639         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2640                                VMXNET3_CMD_UPDATE_PMCFG);
2641
2642         pci_save_state(pdev);
2643         pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
2644                         adapter->wol);
2645         pci_disable_device(pdev);
2646         pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
2647
2648         return 0;
2649 }
2650
2651
2652 static int
2653 vmxnet3_resume(struct device *device)
2654 {
2655         int err;
2656         struct pci_dev *pdev = to_pci_dev(device);
2657         struct net_device *netdev = pci_get_drvdata(pdev);
2658         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2659         struct Vmxnet3_PMConf *pmConf;
2660
2661         if (!netif_running(netdev))
2662                 return 0;
2663
2664         /* Destroy wake-up filters. */
2665         pmConf = adapter->pm_conf;
2666         memset(pmConf, 0, sizeof(*pmConf));
2667
2668         adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2669         adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2670                                                                   *pmConf));
2671         adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
2672                                                                  pmConf));
2673
2674         netif_device_attach(netdev);
2675         pci_set_power_state(pdev, PCI_D0);
2676         pci_restore_state(pdev);
2677         err = pci_enable_device_mem(pdev);
2678         if (err != 0)
2679                 return err;
2680
2681         pci_enable_wake(pdev, PCI_D0, 0);
2682
2683         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2684                                VMXNET3_CMD_UPDATE_PMCFG);
2685         vmxnet3_alloc_intr_resources(adapter);
2686         vmxnet3_request_irqs(adapter);
2687         vmxnet3_enable_all_intrs(adapter);
2688
2689         return 0;
2690 }
2691
2692 static const struct dev_pm_ops vmxnet3_pm_ops = {
2693         .suspend = vmxnet3_suspend,
2694         .resume = vmxnet3_resume,
2695 };
2696 #endif
2697
2698 static struct pci_driver vmxnet3_driver = {
2699         .name           = vmxnet3_driver_name,
2700         .id_table       = vmxnet3_pciid_table,
2701         .probe          = vmxnet3_probe_device,
2702         .remove         = __devexit_p(vmxnet3_remove_device),
2703 #ifdef CONFIG_PM
2704         .driver.pm      = &vmxnet3_pm_ops,
2705 #endif
2706 };
2707
2708
2709 static int __init
2710 vmxnet3_init_module(void)
2711 {
2712         printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
2713                 VMXNET3_DRIVER_VERSION_REPORT);
2714         return pci_register_driver(&vmxnet3_driver);
2715 }
2716
2717 module_init(vmxnet3_init_module);
2718
2719
2720 static void
2721 vmxnet3_exit_module(void)
2722 {
2723         pci_unregister_driver(&vmxnet3_driver);
2724 }
2725
2726 module_exit(vmxnet3_exit_module);
2727
2728 MODULE_AUTHOR("VMware, Inc.");
2729 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
2730 MODULE_LICENSE("GPL v2");
2731 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);