2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
27 #include "vmxnet3_int.h"
29 char vmxnet3_driver_name[] = "vmxnet3";
30 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
35 * Last entry must be all 0s
37 static const struct pci_device_id vmxnet3_pciid_table[] = {
38 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
42 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
44 static atomic_t devices_found;
48 * Enable/Disable the given intr
51 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
53 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
60 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65 * Enable/Disable all intrs used by the device
68 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
72 for (i = 0; i < adapter->intr.num_intrs; i++)
73 vmxnet3_enable_intr(adapter, i);
78 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
82 for (i = 0; i < adapter->intr.num_intrs; i++)
83 vmxnet3_disable_intr(adapter, i);
88 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
90 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
95 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
97 return netif_queue_stopped(adapter->netdev);
102 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
105 netif_start_queue(adapter->netdev);
110 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
113 netif_wake_queue(adapter->netdev);
118 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
122 netif_stop_queue(adapter->netdev);
127 * Check the link state. This may start or stop the tx queue.
130 vmxnet3_check_link(struct vmxnet3_adapter *adapter)
134 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
135 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
136 adapter->link_speed = ret >> 16;
137 if (ret & 1) { /* Link is up. */
138 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
139 adapter->netdev->name, adapter->link_speed);
140 if (!netif_carrier_ok(adapter->netdev))
141 netif_carrier_on(adapter->netdev);
143 vmxnet3_tq_start(&adapter->tx_queue, adapter);
145 printk(KERN_INFO "%s: NIC Link is Down\n",
146 adapter->netdev->name);
147 if (netif_carrier_ok(adapter->netdev))
148 netif_carrier_off(adapter->netdev);
150 vmxnet3_tq_stop(&adapter->tx_queue, adapter);
156 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
158 u32 events = adapter->shared->ecr;
162 vmxnet3_ack_events(adapter, events);
164 /* Check if link state has changed */
165 if (events & VMXNET3_ECR_LINK)
166 vmxnet3_check_link(adapter);
168 /* Check if there is an error on xmit/recv queues */
169 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
170 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
171 VMXNET3_CMD_GET_QUEUE_STATUS);
173 if (adapter->tqd_start->status.stopped) {
174 printk(KERN_ERR "%s: tq error 0x%x\n",
175 adapter->netdev->name,
176 adapter->tqd_start->status.error);
178 if (adapter->rqd_start->status.stopped) {
179 printk(KERN_ERR "%s: rq error 0x%x\n",
180 adapter->netdev->name,
181 adapter->rqd_start->status.error);
184 schedule_work(&adapter->work);
190 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
191 struct pci_dev *pdev)
193 if (tbi->map_type == VMXNET3_MAP_SINGLE)
194 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
196 else if (tbi->map_type == VMXNET3_MAP_PAGE)
197 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
200 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
202 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
207 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
208 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
213 /* no out of order completion */
214 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
215 BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1);
217 skb = tq->buf_info[eop_idx].skb;
219 tq->buf_info[eop_idx].skb = NULL;
221 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
223 while (tq->tx_ring.next2comp != eop_idx) {
224 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
227 /* update next2comp w/o tx_lock. Since we are marking more,
228 * instead of less, tx ring entries avail, the worst case is
229 * that the tx routine incorrectly re-queues a pkt due to
230 * insufficient tx ring entries.
232 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
236 dev_kfree_skb_any(skb);
242 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
243 struct vmxnet3_adapter *adapter)
246 union Vmxnet3_GenericDesc *gdesc;
248 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
249 while (gdesc->tcd.gen == tq->comp_ring.gen) {
250 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq,
251 adapter->pdev, adapter);
253 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
254 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
258 spin_lock(&tq->tx_lock);
259 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
260 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
261 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
262 netif_carrier_ok(adapter->netdev))) {
263 vmxnet3_tq_wake(tq, adapter);
265 spin_unlock(&tq->tx_lock);
272 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
273 struct vmxnet3_adapter *adapter)
277 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
278 struct vmxnet3_tx_buf_info *tbi;
279 union Vmxnet3_GenericDesc *gdesc;
281 tbi = tq->buf_info + tq->tx_ring.next2comp;
282 gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
284 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
286 dev_kfree_skb_any(tbi->skb);
289 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
292 /* sanity check, verify all buffers are indeed unmapped and freed */
293 for (i = 0; i < tq->tx_ring.size; i++) {
294 BUG_ON(tq->buf_info[i].skb != NULL ||
295 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
298 tq->tx_ring.gen = VMXNET3_INIT_GEN;
299 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
301 tq->comp_ring.gen = VMXNET3_INIT_GEN;
302 tq->comp_ring.next2proc = 0;
307 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
308 struct vmxnet3_adapter *adapter)
310 if (tq->tx_ring.base) {
311 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
312 sizeof(struct Vmxnet3_TxDesc),
313 tq->tx_ring.base, tq->tx_ring.basePA);
314 tq->tx_ring.base = NULL;
316 if (tq->data_ring.base) {
317 pci_free_consistent(adapter->pdev, tq->data_ring.size *
318 sizeof(struct Vmxnet3_TxDataDesc),
319 tq->data_ring.base, tq->data_ring.basePA);
320 tq->data_ring.base = NULL;
322 if (tq->comp_ring.base) {
323 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
324 sizeof(struct Vmxnet3_TxCompDesc),
325 tq->comp_ring.base, tq->comp_ring.basePA);
326 tq->comp_ring.base = NULL;
334 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
335 struct vmxnet3_adapter *adapter)
339 /* reset the tx ring contents to 0 and reset the tx ring states */
340 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
341 sizeof(struct Vmxnet3_TxDesc));
342 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
343 tq->tx_ring.gen = VMXNET3_INIT_GEN;
345 memset(tq->data_ring.base, 0, tq->data_ring.size *
346 sizeof(struct Vmxnet3_TxDataDesc));
348 /* reset the tx comp ring contents to 0 and reset comp ring states */
349 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
350 sizeof(struct Vmxnet3_TxCompDesc));
351 tq->comp_ring.next2proc = 0;
352 tq->comp_ring.gen = VMXNET3_INIT_GEN;
354 /* reset the bookkeeping data */
355 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
356 for (i = 0; i < tq->tx_ring.size; i++)
357 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
359 /* stats are not reset */
364 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
365 struct vmxnet3_adapter *adapter)
367 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
368 tq->comp_ring.base || tq->buf_info);
370 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
371 * sizeof(struct Vmxnet3_TxDesc),
372 &tq->tx_ring.basePA);
373 if (!tq->tx_ring.base) {
374 printk(KERN_ERR "%s: failed to allocate tx ring\n",
375 adapter->netdev->name);
379 tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
381 sizeof(struct Vmxnet3_TxDataDesc),
382 &tq->data_ring.basePA);
383 if (!tq->data_ring.base) {
384 printk(KERN_ERR "%s: failed to allocate data ring\n",
385 adapter->netdev->name);
389 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
391 sizeof(struct Vmxnet3_TxCompDesc),
392 &tq->comp_ring.basePA);
393 if (!tq->comp_ring.base) {
394 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
395 adapter->netdev->name);
399 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
402 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
403 adapter->netdev->name);
410 vmxnet3_tq_destroy(tq, adapter);
416 * starting from ring->next2fill, allocate rx buffers for the given ring
417 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
418 * are allocated or allocation fails
422 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
423 int num_to_alloc, struct vmxnet3_adapter *adapter)
425 int num_allocated = 0;
426 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
427 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
430 while (num_allocated < num_to_alloc) {
431 struct vmxnet3_rx_buf_info *rbi;
432 union Vmxnet3_GenericDesc *gd;
434 rbi = rbi_base + ring->next2fill;
435 gd = ring->base + ring->next2fill;
437 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
438 if (rbi->skb == NULL) {
439 rbi->skb = dev_alloc_skb(rbi->len +
441 if (unlikely(rbi->skb == NULL)) {
442 rq->stats.rx_buf_alloc_failure++;
445 rbi->skb->dev = adapter->netdev;
447 skb_reserve(rbi->skb, NET_IP_ALIGN);
448 rbi->dma_addr = pci_map_single(adapter->pdev,
449 rbi->skb->data, rbi->len,
452 /* rx buffer skipped by the device */
454 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
456 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
457 rbi->len != PAGE_SIZE);
459 if (rbi->page == NULL) {
460 rbi->page = alloc_page(GFP_ATOMIC);
461 if (unlikely(rbi->page == NULL)) {
462 rq->stats.rx_buf_alloc_failure++;
465 rbi->dma_addr = pci_map_page(adapter->pdev,
466 rbi->page, 0, PAGE_SIZE,
469 /* rx buffers skipped by the device */
471 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
474 BUG_ON(rbi->dma_addr == 0);
475 gd->rxd.addr = rbi->dma_addr;
476 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val |
480 vmxnet3_cmd_ring_adv_next2fill(ring);
482 rq->uncommitted[ring_idx] += num_allocated;
484 dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
485 "%u, uncommited %u\n", num_allocated, ring->next2fill,
486 ring->next2comp, rq->uncommitted[ring_idx]);
488 /* so that the device can distinguish a full ring and an empty ring */
489 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
491 return num_allocated;
496 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
497 struct vmxnet3_rx_buf_info *rbi)
499 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
500 skb_shinfo(skb)->nr_frags;
502 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
504 frag->page = rbi->page;
505 frag->page_offset = 0;
506 frag->size = rcd->len;
507 skb->data_len += frag->size;
508 skb_shinfo(skb)->nr_frags++;
513 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
514 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
515 struct vmxnet3_adapter *adapter)
518 unsigned long buf_offset;
520 union Vmxnet3_GenericDesc *gdesc;
521 struct vmxnet3_tx_buf_info *tbi = NULL;
523 BUG_ON(ctx->copy_size > skb_headlen(skb));
525 /* use the previous gen bit for the SOP desc */
526 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
528 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
529 gdesc = ctx->sop_txd; /* both loops below can be skipped */
531 /* no need to map the buffer if headers are copied */
532 if (ctx->copy_size) {
533 ctx->sop_txd->txd.addr = tq->data_ring.basePA +
534 tq->tx_ring.next2fill *
535 sizeof(struct Vmxnet3_TxDataDesc);
536 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size;
537 ctx->sop_txd->dword[3] = 0;
539 tbi = tq->buf_info + tq->tx_ring.next2fill;
540 tbi->map_type = VMXNET3_MAP_NONE;
542 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
543 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
544 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
545 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
547 /* use the right gen for non-SOP desc */
548 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
551 /* linear part can use multiple tx desc if it's big */
552 len = skb_headlen(skb) - ctx->copy_size;
553 buf_offset = ctx->copy_size;
557 buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
558 VMXNET3_MAX_TX_BUF_SIZE : len;
560 tbi = tq->buf_info + tq->tx_ring.next2fill;
561 tbi->map_type = VMXNET3_MAP_SINGLE;
562 tbi->dma_addr = pci_map_single(adapter->pdev,
563 skb->data + buf_offset, buf_size,
566 tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
568 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
569 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
571 gdesc->txd.addr = tbi->dma_addr;
572 gdesc->dword[2] = dw2 | buf_size;
575 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
576 tq->tx_ring.next2fill, gdesc->txd.addr,
577 gdesc->dword[2], gdesc->dword[3]);
578 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
579 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
582 buf_offset += buf_size;
585 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
586 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
588 tbi = tq->buf_info + tq->tx_ring.next2fill;
589 tbi->map_type = VMXNET3_MAP_PAGE;
590 tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
591 frag->page_offset, frag->size,
594 tbi->len = frag->size;
596 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
597 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
599 gdesc->txd.addr = tbi->dma_addr;
600 gdesc->dword[2] = dw2 | frag->size;
603 dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
604 tq->tx_ring.next2fill, gdesc->txd.addr,
605 gdesc->dword[2], gdesc->dword[3]);
606 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
607 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
610 ctx->eop_txd = gdesc;
612 /* set the last buf_info for the pkt */
614 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
619 * parse and copy relevant protocol headers:
620 * For a tso pkt, relevant headers are L2/3/4 including options
621 * For a pkt requesting csum offloading, they are L2/3 and may include L4
622 * if it's a TCP/UDP pkt
625 * -1: error happens during parsing
626 * 0: protocol headers parsed, but too big to be copied
627 * 1: protocol headers parsed and copied
630 * 1. related *ctx fields are updated.
631 * 2. ctx->copy_size is # of bytes copied
632 * 3. the portion copied is guaranteed to be in the linear part
636 vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
637 struct vmxnet3_tx_ctx *ctx,
638 struct vmxnet3_adapter *adapter)
640 struct Vmxnet3_TxDataDesc *tdd;
643 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
644 ctx->l4_hdr_size = ((struct tcphdr *)
645 skb_transport_header(skb))->doff * 4;
646 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
648 unsigned int pull_size;
650 if (skb->ip_summed == CHECKSUM_PARTIAL) {
651 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
654 struct iphdr *iph = (struct iphdr *)
655 skb_network_header(skb);
656 if (iph->protocol == IPPROTO_TCP) {
657 pull_size = ctx->eth_ip_hdr_size +
658 sizeof(struct tcphdr);
660 if (unlikely(!pskb_may_pull(skb,
664 ctx->l4_hdr_size = ((struct tcphdr *)
665 skb_transport_header(skb))->doff * 4;
666 } else if (iph->protocol == IPPROTO_UDP) {
668 sizeof(struct udphdr);
670 ctx->l4_hdr_size = 0;
673 /* for simplicity, don't copy L4 headers */
674 ctx->l4_hdr_size = 0;
676 ctx->copy_size = ctx->eth_ip_hdr_size +
679 ctx->eth_ip_hdr_size = 0;
680 ctx->l4_hdr_size = 0;
681 /* copy as much as allowed */
682 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
686 /* make sure headers are accessible directly */
687 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
691 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
692 tq->stats.oversized_hdr++;
697 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
699 memcpy(tdd->data, skb->data, ctx->copy_size);
700 dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
701 ctx->copy_size, tq->tx_ring.next2fill);
710 vmxnet3_prepare_tso(struct sk_buff *skb,
711 struct vmxnet3_tx_ctx *ctx)
713 struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
715 struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
717 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
720 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
721 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
728 * Transmits a pkt thru a given tq
730 * NETDEV_TX_OK: descriptors are setup successfully
731 * NETDEV_TX_OK: error occured, the pkt is dropped
732 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
735 * 1. tx ring may be changed
736 * 2. tq stats may be updated accordingly
737 * 3. shared->txNumDeferred may be updated
741 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
742 struct vmxnet3_adapter *adapter, struct net_device *netdev)
747 struct vmxnet3_tx_ctx ctx;
748 union Vmxnet3_GenericDesc *gdesc;
750 /* conservatively estimate # of descriptors to use */
751 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
752 skb_shinfo(skb)->nr_frags + 1;
754 ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
756 ctx.mss = skb_shinfo(skb)->gso_size;
758 if (skb_header_cloned(skb)) {
759 if (unlikely(pskb_expand_head(skb, 0, 0,
761 tq->stats.drop_tso++;
764 tq->stats.copy_skb_header++;
766 vmxnet3_prepare_tso(skb, &ctx);
768 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
770 /* non-tso pkts must not use more than
771 * VMXNET3_MAX_TXD_PER_PKT entries
773 if (skb_linearize(skb) != 0) {
774 tq->stats.drop_too_many_frags++;
777 tq->stats.linearized++;
779 /* recalculate the # of descriptors to use */
780 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
784 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
786 BUG_ON(ret <= 0 && ctx.copy_size != 0);
787 /* hdrs parsed, check against other limits */
789 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
790 VMXNET3_MAX_TX_BUF_SIZE)) {
794 if (skb->ip_summed == CHECKSUM_PARTIAL) {
795 if (unlikely(ctx.eth_ip_hdr_size +
797 VMXNET3_MAX_CSUM_OFFSET)) {
803 tq->stats.drop_hdr_inspect_err++;
807 spin_lock_irqsave(&tq->tx_lock, flags);
809 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
810 tq->stats.tx_ring_full++;
811 dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
812 " next2fill %u\n", adapter->netdev->name,
813 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
815 vmxnet3_tq_stop(tq, adapter);
816 spin_unlock_irqrestore(&tq->tx_lock, flags);
817 return NETDEV_TX_BUSY;
820 /* fill tx descs related to addr & len */
821 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
823 /* setup the EOP desc */
824 ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
826 /* setup the SOP desc */
829 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
830 gdesc->txd.om = VMXNET3_OM_TSO;
831 gdesc->txd.msscof = ctx.mss;
832 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen +
833 ctx.mss - 1) / ctx.mss;
835 if (skb->ip_summed == CHECKSUM_PARTIAL) {
836 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
837 gdesc->txd.om = VMXNET3_OM_CSUM;
838 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
842 gdesc->txd.msscof = 0;
844 tq->shared->txNumDeferred++;
847 if (vlan_tx_tag_present(skb)) {
849 gdesc->txd.tci = vlan_tx_tag_get(skb);
854 /* finally flips the GEN bit of the SOP desc */
855 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
856 dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
857 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
858 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
861 spin_unlock_irqrestore(&tq->tx_lock, flags);
863 if (tq->shared->txNumDeferred >= tq->shared->txThreshold) {
864 tq->shared->txNumDeferred = 0;
865 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
866 tq->tx_ring.next2fill);
868 netdev->trans_start = jiffies;
873 tq->stats.drop_oversized_hdr++;
875 tq->stats.drop_total++;
882 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
884 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
885 struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
887 return vmxnet3_tq_xmit(skb, tq, adapter, netdev);
892 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
894 union Vmxnet3_GenericDesc *gdesc)
896 if (!gdesc->rcd.cnc && adapter->rxcsum) {
897 /* typical case: TCP/UDP over IP and both csums are correct */
898 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) ==
899 VMXNET3_RCD_CSUM_OK) {
900 skb->ip_summed = CHECKSUM_UNNECESSARY;
901 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
902 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
903 BUG_ON(gdesc->rcd.frg);
905 if (gdesc->rcd.csum) {
906 skb->csum = htons(gdesc->rcd.csum);
907 skb->ip_summed = CHECKSUM_PARTIAL;
909 skb->ip_summed = CHECKSUM_NONE;
913 skb->ip_summed = CHECKSUM_NONE;
919 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
920 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
922 rq->stats.drop_err++;
924 rq->stats.drop_fcs++;
926 rq->stats.drop_total++;
929 * We do not unmap and chain the rx buffer to the skb.
930 * We basically pretend this buffer is not used and will be recycled
931 * by vmxnet3_rq_alloc_rx_buf()
935 * ctx->skb may be NULL if this is the first and the only one
939 dev_kfree_skb_irq(ctx->skb);
946 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
947 struct vmxnet3_adapter *adapter, int quota)
949 static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
951 struct Vmxnet3_RxCompDesc *rcd;
952 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
954 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
955 while (rcd->gen == rq->comp_ring.gen) {
956 struct vmxnet3_rx_buf_info *rbi;
959 struct Vmxnet3_RxDesc *rxd;
962 if (num_rxd >= quota) {
963 /* we may stop even before we see the EOP desc of
971 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
973 rxd = &rq->rx_ring[ring_idx].base[idx].rxd;
974 rbi = rq->buf_info[ring_idx] + idx;
976 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len);
978 if (unlikely(rcd->eop && rcd->err)) {
979 vmxnet3_rx_error(rq, rcd, ctx, adapter);
983 if (rcd->sop) { /* first buf of the pkt */
984 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
985 rcd->rqID != rq->qid);
987 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
988 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
990 if (unlikely(rcd->len == 0)) {
991 /* Pretend the rx buffer is skipped. */
992 BUG_ON(!(rcd->sop && rcd->eop));
993 dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
1001 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1002 PCI_DMA_FROMDEVICE);
1004 skb_put(ctx->skb, rcd->len);
1006 BUG_ON(ctx->skb == NULL);
1007 /* non SOP buffer must be type 1 in most cases */
1008 if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
1009 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1012 pci_unmap_page(adapter->pdev,
1013 rbi->dma_addr, rbi->len,
1014 PCI_DMA_FROMDEVICE);
1016 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1021 * The only time a non-SOP buffer is type 0 is
1022 * when it's EOP and error flag is raised, which
1023 * has already been handled.
1031 skb->len += skb->data_len;
1032 skb->truesize += skb->data_len;
1034 vmxnet3_rx_csum(adapter, skb,
1035 (union Vmxnet3_GenericDesc *)rcd);
1036 skb->protocol = eth_type_trans(skb, adapter->netdev);
1038 if (unlikely(adapter->vlan_grp && rcd->ts)) {
1039 vlan_hwaccel_receive_skb(skb,
1040 adapter->vlan_grp, rcd->tci);
1042 netif_receive_skb(skb);
1045 adapter->netdev->last_rx = jiffies;
1050 /* device may skip some rx descs */
1051 rq->rx_ring[ring_idx].next2comp = idx;
1052 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
1053 rq->rx_ring[ring_idx].size);
1055 /* refill rx buffers frequently to avoid starving the h/w */
1056 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
1058 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
1059 ring_idx, adapter))) {
1060 vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
1063 /* if needed, update the register */
1064 if (unlikely(rq->shared->updateRxProd)) {
1065 VMXNET3_WRITE_BAR0_REG(adapter,
1066 rxprod_reg[ring_idx] + rq->qid * 8,
1067 rq->rx_ring[ring_idx].next2fill);
1068 rq->uncommitted[ring_idx] = 0;
1072 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1073 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
1081 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1082 struct vmxnet3_adapter *adapter)
1085 struct Vmxnet3_RxDesc *rxd;
1087 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1088 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1089 rxd = &rq->rx_ring[ring_idx].base[i].rxd;
1091 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1092 rq->buf_info[ring_idx][i].skb) {
1093 pci_unmap_single(adapter->pdev, rxd->addr,
1094 rxd->len, PCI_DMA_FROMDEVICE);
1095 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1096 rq->buf_info[ring_idx][i].skb = NULL;
1097 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1098 rq->buf_info[ring_idx][i].page) {
1099 pci_unmap_page(adapter->pdev, rxd->addr,
1100 rxd->len, PCI_DMA_FROMDEVICE);
1101 put_page(rq->buf_info[ring_idx][i].page);
1102 rq->buf_info[ring_idx][i].page = NULL;
1106 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1107 rq->rx_ring[ring_idx].next2fill =
1108 rq->rx_ring[ring_idx].next2comp = 0;
1109 rq->uncommitted[ring_idx] = 0;
1112 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1113 rq->comp_ring.next2proc = 0;
1117 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1118 struct vmxnet3_adapter *adapter)
1123 /* all rx buffers must have already been freed */
1124 for (i = 0; i < 2; i++) {
1125 if (rq->buf_info[i]) {
1126 for (j = 0; j < rq->rx_ring[i].size; j++)
1127 BUG_ON(rq->buf_info[i][j].page != NULL);
1132 kfree(rq->buf_info[0]);
1134 for (i = 0; i < 2; i++) {
1135 if (rq->rx_ring[i].base) {
1136 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1137 * sizeof(struct Vmxnet3_RxDesc),
1138 rq->rx_ring[i].base,
1139 rq->rx_ring[i].basePA);
1140 rq->rx_ring[i].base = NULL;
1142 rq->buf_info[i] = NULL;
1145 if (rq->comp_ring.base) {
1146 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1147 sizeof(struct Vmxnet3_RxCompDesc),
1148 rq->comp_ring.base, rq->comp_ring.basePA);
1149 rq->comp_ring.base = NULL;
1155 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1156 struct vmxnet3_adapter *adapter)
1160 /* initialize buf_info */
1161 for (i = 0; i < rq->rx_ring[0].size; i++) {
1163 /* 1st buf for a pkt is skbuff */
1164 if (i % adapter->rx_buf_per_pkt == 0) {
1165 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1166 rq->buf_info[0][i].len = adapter->skb_buf_size;
1167 } else { /* subsequent bufs for a pkt is frag */
1168 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1169 rq->buf_info[0][i].len = PAGE_SIZE;
1172 for (i = 0; i < rq->rx_ring[1].size; i++) {
1173 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1174 rq->buf_info[1][i].len = PAGE_SIZE;
1177 /* reset internal state and allocate buffers for both rings */
1178 for (i = 0; i < 2; i++) {
1179 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1180 rq->uncommitted[i] = 0;
1182 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1183 sizeof(struct Vmxnet3_RxDesc));
1184 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1186 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1188 /* at least has 1 rx buffer for the 1st ring */
1191 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1193 /* reset the comp ring */
1194 rq->comp_ring.next2proc = 0;
1195 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1196 sizeof(struct Vmxnet3_RxCompDesc));
1197 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1200 rq->rx_ctx.skb = NULL;
1202 /* stats are not reset */
1208 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1212 struct vmxnet3_rx_buf_info *bi;
1214 for (i = 0; i < 2; i++) {
1216 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1217 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1218 &rq->rx_ring[i].basePA);
1219 if (!rq->rx_ring[i].base) {
1220 printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1221 adapter->netdev->name, i);
1226 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1227 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1228 &rq->comp_ring.basePA);
1229 if (!rq->comp_ring.base) {
1230 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1231 adapter->netdev->name);
1235 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1236 rq->rx_ring[1].size);
1237 bi = kmalloc(sz, GFP_KERNEL);
1239 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1240 adapter->netdev->name);
1244 rq->buf_info[0] = bi;
1245 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1250 vmxnet3_rq_destroy(rq, adapter);
1256 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1258 if (unlikely(adapter->shared->ecr))
1259 vmxnet3_process_events(adapter);
1261 vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
1262 return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
1267 vmxnet3_poll(struct napi_struct *napi, int budget)
1269 struct vmxnet3_adapter *adapter = container_of(napi,
1270 struct vmxnet3_adapter, napi);
1273 rxd_done = vmxnet3_do_poll(adapter, budget);
1275 if (rxd_done < budget) {
1276 napi_complete(napi);
1277 vmxnet3_enable_intr(adapter, 0);
1283 /* Interrupt handler for vmxnet3 */
1285 vmxnet3_intr(int irq, void *dev_id)
1287 struct net_device *dev = dev_id;
1288 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1290 if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
1291 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1292 if (unlikely(icr == 0))
1298 /* disable intr if needed */
1299 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1300 vmxnet3_disable_intr(adapter, 0);
1302 napi_schedule(&adapter->napi);
1307 #ifdef CONFIG_NET_POLL_CONTROLLER
1310 /* netpoll callback. */
1312 vmxnet3_netpoll(struct net_device *netdev)
1314 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1317 if (adapter->intr.type == VMXNET3_IT_MSIX)
1318 irq = adapter->intr.msix_entries[0].vector;
1320 irq = adapter->pdev->irq;
1323 vmxnet3_intr(irq, netdev);
1329 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1333 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1334 /* we only use 1 MSI-X vector */
1335 err = request_irq(adapter->intr.msix_entries[0].vector,
1336 vmxnet3_intr, 0, adapter->netdev->name,
1338 } else if (adapter->intr.type == VMXNET3_IT_MSI) {
1339 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1340 adapter->netdev->name, adapter->netdev);
1342 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1343 IRQF_SHARED, adapter->netdev->name,
1348 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1349 ":%d\n", adapter->netdev->name, adapter->intr.type, err);
1354 /* init our intr settings */
1355 for (i = 0; i < adapter->intr.num_intrs; i++)
1356 adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
1358 /* next setup intr index for all intr sources */
1359 adapter->tx_queue.comp_ring.intr_idx = 0;
1360 adapter->rx_queue.comp_ring.intr_idx = 0;
1361 adapter->intr.event_intr_idx = 0;
1363 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1364 "allocated\n", adapter->netdev->name, adapter->intr.type,
1365 adapter->intr.mask_mode, adapter->intr.num_intrs);
1373 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1375 BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
1376 adapter->intr.num_intrs <= 0);
1378 switch (adapter->intr.type) {
1379 case VMXNET3_IT_MSIX:
1383 for (i = 0; i < adapter->intr.num_intrs; i++)
1384 free_irq(adapter->intr.msix_entries[i].vector,
1388 case VMXNET3_IT_MSI:
1389 free_irq(adapter->pdev->irq, adapter->netdev);
1391 case VMXNET3_IT_INTX:
1392 free_irq(adapter->pdev->irq, adapter->netdev);
1401 vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1403 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1404 struct Vmxnet3_DriverShared *shared = adapter->shared;
1405 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1408 /* add vlan rx stripping. */
1409 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1411 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1412 adapter->vlan_grp = grp;
1414 /* update FEATURES to device */
1415 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1416 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1417 VMXNET3_CMD_UPDATE_FEATURE);
1419 * Clear entire vfTable; then enable untagged pkts.
1420 * Note: setting one entry in vfTable to non-zero turns
1421 * on VLAN rx filtering.
1423 for (i = 0; i < VMXNET3_VFT_SIZE; i++)
1426 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1427 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1428 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1430 printk(KERN_ERR "%s: vlan_rx_register when device has "
1431 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
1434 /* remove vlan rx stripping. */
1435 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1436 adapter->vlan_grp = NULL;
1438 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
1441 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
1442 /* clear entire vfTable; this also disables
1447 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1448 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1450 /* update FEATURES to device */
1451 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1452 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1453 VMXNET3_CMD_UPDATE_FEATURE);
1460 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1462 if (adapter->vlan_grp) {
1464 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1465 bool activeVlan = false;
1467 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1468 if (vlan_group_get_device(adapter->vlan_grp, vid)) {
1469 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1474 /* continue to allow untagged pkts */
1475 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1482 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1484 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1485 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1487 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1488 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1489 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1494 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1496 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1497 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1499 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1500 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1501 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1506 vmxnet3_copy_mc(struct net_device *netdev)
1509 u32 sz = netdev->mc_count * ETH_ALEN;
1511 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1513 /* We may be called with BH disabled */
1514 buf = kmalloc(sz, GFP_ATOMIC);
1517 struct dev_mc_list *mc = netdev->mc_list;
1519 for (i = 0; i < netdev->mc_count; i++) {
1521 memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
1532 vmxnet3_set_mc(struct net_device *netdev)
1534 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1535 struct Vmxnet3_RxFilterConf *rxConf =
1536 &adapter->shared->devRead.rxFilterConf;
1537 u8 *new_table = NULL;
1538 u32 new_mode = VMXNET3_RXM_UCAST;
1540 if (netdev->flags & IFF_PROMISC)
1541 new_mode |= VMXNET3_RXM_PROMISC;
1543 if (netdev->flags & IFF_BROADCAST)
1544 new_mode |= VMXNET3_RXM_BCAST;
1546 if (netdev->flags & IFF_ALLMULTI)
1547 new_mode |= VMXNET3_RXM_ALL_MULTI;
1549 if (netdev->mc_count > 0) {
1550 new_table = vmxnet3_copy_mc(netdev);
1552 new_mode |= VMXNET3_RXM_MCAST;
1553 rxConf->mfTableLen = netdev->mc_count *
1555 rxConf->mfTablePA = virt_to_phys(new_table);
1557 printk(KERN_INFO "%s: failed to copy mcast list"
1558 ", setting ALL_MULTI\n", netdev->name);
1559 new_mode |= VMXNET3_RXM_ALL_MULTI;
1564 if (!(new_mode & VMXNET3_RXM_MCAST)) {
1565 rxConf->mfTableLen = 0;
1566 rxConf->mfTablePA = 0;
1569 if (new_mode != rxConf->rxMode) {
1570 rxConf->rxMode = new_mode;
1571 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1572 VMXNET3_CMD_UPDATE_RX_MODE);
1575 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1576 VMXNET3_CMD_UPDATE_MAC_FILTERS);
1583 * Set up driver_shared based on settings in adapter.
1587 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1589 struct Vmxnet3_DriverShared *shared = adapter->shared;
1590 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1591 struct Vmxnet3_TxQueueConf *tqc;
1592 struct Vmxnet3_RxQueueConf *rqc;
1595 memset(shared, 0, sizeof(*shared));
1597 /* driver settings */
1598 shared->magic = VMXNET3_REV1_MAGIC;
1599 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
1600 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1601 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1602 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1603 devRead->misc.driverInfo.vmxnet3RevSpt = 1;
1604 devRead->misc.driverInfo.uptVerSpt = 1;
1606 devRead->misc.ddPA = virt_to_phys(adapter);
1607 devRead->misc.ddLen = sizeof(struct vmxnet3_adapter);
1609 /* set up feature flags */
1610 if (adapter->rxcsum)
1611 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
1614 devRead->misc.uptFeatures |= UPT1_F_LRO;
1615 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS;
1617 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
1618 && adapter->vlan_grp) {
1619 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1622 devRead->misc.mtu = adapter->netdev->mtu;
1623 devRead->misc.queueDescPA = adapter->queue_desc_pa;
1624 devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) +
1625 sizeof(struct Vmxnet3_RxQueueDesc);
1627 /* tx queue settings */
1628 BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1630 devRead->misc.numTxQueues = 1;
1631 tqc = &adapter->tqd_start->conf;
1632 tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA;
1633 tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA;
1634 tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA;
1635 tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info);
1636 tqc->txRingSize = adapter->tx_queue.tx_ring.size;
1637 tqc->dataRingSize = adapter->tx_queue.data_ring.size;
1638 tqc->compRingSize = adapter->tx_queue.comp_ring.size;
1639 tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) *
1641 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
1643 /* rx queue settings */
1644 devRead->misc.numRxQueues = 1;
1645 rqc = &adapter->rqd_start->conf;
1646 rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA;
1647 rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA;
1648 rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA;
1649 rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info);
1650 rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size;
1651 rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size;
1652 rqc->compRingSize = adapter->rx_queue.comp_ring.size;
1653 rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) *
1654 (rqc->rxRingSize[0] + rqc->rxRingSize[1]);
1655 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
1658 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
1660 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
1661 for (i = 0; i < adapter->intr.num_intrs; i++)
1662 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
1664 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
1666 /* rx filter settings */
1667 devRead->rxFilterConf.rxMode = 0;
1668 vmxnet3_restore_vlan(adapter);
1669 /* the rest are already zeroed */
1674 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1679 dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
1680 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
1681 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
1682 adapter->rx_queue.rx_ring[0].size,
1683 adapter->rx_queue.rx_ring[1].size);
1685 vmxnet3_tq_init(&adapter->tx_queue, adapter);
1686 err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
1688 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
1689 adapter->netdev->name, err);
1693 err = vmxnet3_request_irqs(adapter);
1695 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
1696 adapter->netdev->name, err);
1700 vmxnet3_setup_driver_shared(adapter);
1702 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL,
1703 VMXNET3_GET_ADDR_LO(adapter->shared_pa));
1704 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH,
1705 VMXNET3_GET_ADDR_HI(adapter->shared_pa));
1707 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1708 VMXNET3_CMD_ACTIVATE_DEV);
1709 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
1712 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
1713 adapter->netdev->name, ret);
1717 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
1718 adapter->rx_queue.rx_ring[0].next2fill);
1719 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
1720 adapter->rx_queue.rx_ring[1].next2fill);
1722 /* Apply the rx filter settins last. */
1723 vmxnet3_set_mc(adapter->netdev);
1726 * Check link state when first activating device. It will start the
1727 * tx queue if the link is up.
1729 vmxnet3_check_link(adapter);
1731 napi_enable(&adapter->napi);
1732 vmxnet3_enable_all_intrs(adapter);
1733 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
1737 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
1738 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
1739 vmxnet3_free_irqs(adapter);
1742 /* free up buffers we allocated */
1743 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1749 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
1751 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1756 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1758 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
1762 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1763 VMXNET3_CMD_QUIESCE_DEV);
1764 vmxnet3_disable_all_intrs(adapter);
1766 napi_disable(&adapter->napi);
1767 netif_tx_disable(adapter->netdev);
1768 adapter->link_speed = 0;
1769 netif_carrier_off(adapter->netdev);
1771 vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
1772 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1773 vmxnet3_free_irqs(adapter);
1779 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
1784 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
1786 tmp = (mac[5] << 8) | mac[4];
1787 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
1792 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
1794 struct sockaddr *addr = p;
1795 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1797 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1798 vmxnet3_write_mac_addr(adapter, addr->sa_data);
1804 /* ==================== initialization and cleanup routines ============ */
1807 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
1810 unsigned long mmio_start, mmio_len;
1811 struct pci_dev *pdev = adapter->pdev;
1813 err = pci_enable_device(pdev);
1815 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
1816 pci_name(pdev), err);
1820 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1821 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
1822 printk(KERN_ERR "pci_set_consistent_dma_mask failed "
1823 "for adapter %s\n", pci_name(pdev));
1829 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
1830 printk(KERN_ERR "pci_set_dma_mask failed for adapter "
1831 "%s\n", pci_name(pdev));
1838 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
1839 vmxnet3_driver_name);
1841 printk(KERN_ERR "Failed to request region for adapter %s: "
1842 "error %d\n", pci_name(pdev), err);
1846 pci_set_master(pdev);
1848 mmio_start = pci_resource_start(pdev, 0);
1849 mmio_len = pci_resource_len(pdev, 0);
1850 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
1851 if (!adapter->hw_addr0) {
1852 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
1858 mmio_start = pci_resource_start(pdev, 1);
1859 mmio_len = pci_resource_len(pdev, 1);
1860 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
1861 if (!adapter->hw_addr1) {
1862 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
1870 iounmap(adapter->hw_addr0);
1872 pci_release_selected_regions(pdev, (1 << 2) - 1);
1874 pci_disable_device(pdev);
1880 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
1882 BUG_ON(!adapter->pdev);
1884 iounmap(adapter->hw_addr0);
1885 iounmap(adapter->hw_addr1);
1886 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
1887 pci_disable_device(adapter->pdev);
1892 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
1896 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
1897 VMXNET3_MAX_ETH_HDR_SIZE) {
1898 adapter->skb_buf_size = adapter->netdev->mtu +
1899 VMXNET3_MAX_ETH_HDR_SIZE;
1900 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
1901 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
1903 adapter->rx_buf_per_pkt = 1;
1905 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
1906 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
1907 VMXNET3_MAX_ETH_HDR_SIZE;
1908 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
1912 * for simplicity, force the ring0 size to be a multiple of
1913 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
1915 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
1916 adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
1918 adapter->rx_queue.rx_ring[0].size = min_t(u32,
1919 adapter->rx_queue.rx_ring[0].size,
1920 VMXNET3_RX_RING_MAX_SIZE / sz * sz);
1925 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
1926 u32 rx_ring_size, u32 rx_ring2_size)
1930 adapter->tx_queue.tx_ring.size = tx_ring_size;
1931 adapter->tx_queue.data_ring.size = tx_ring_size;
1932 adapter->tx_queue.comp_ring.size = tx_ring_size;
1933 adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
1934 adapter->tx_queue.stopped = true;
1935 err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
1939 adapter->rx_queue.rx_ring[0].size = rx_ring_size;
1940 adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
1941 vmxnet3_adjust_rx_ring_size(adapter);
1942 adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
1943 adapter->rx_queue.rx_ring[1].size;
1944 adapter->rx_queue.qid = 0;
1945 adapter->rx_queue.qid2 = 1;
1946 adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
1947 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
1949 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
1955 vmxnet3_open(struct net_device *netdev)
1957 struct vmxnet3_adapter *adapter;
1960 adapter = netdev_priv(netdev);
1962 spin_lock_init(&adapter->tx_queue.tx_lock);
1964 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
1965 VMXNET3_DEF_RX_RING_SIZE,
1966 VMXNET3_DEF_RX_RING_SIZE);
1970 err = vmxnet3_activate_dev(adapter);
1977 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
1978 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
1985 vmxnet3_close(struct net_device *netdev)
1987 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1990 * Reset_work may be in the middle of resetting the device, wait for its
1993 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
1996 vmxnet3_quiesce_dev(adapter);
1998 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
1999 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
2001 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2009 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2012 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2013 * vmxnet3_close() will deadlock.
2015 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2017 /* we need to enable NAPI, otherwise dev_close will deadlock */
2018 napi_enable(&adapter->napi);
2019 dev_close(adapter->netdev);
2024 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2026 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2029 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2032 if (new_mtu > 1500 && !adapter->jumbo_frame)
2035 netdev->mtu = new_mtu;
2038 * Reset_work may be in the middle of resetting the device, wait for its
2041 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2044 if (netif_running(netdev)) {
2045 vmxnet3_quiesce_dev(adapter);
2046 vmxnet3_reset_dev(adapter);
2048 /* we need to re-create the rx queue based on the new mtu */
2049 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
2050 vmxnet3_adjust_rx_ring_size(adapter);
2051 adapter->rx_queue.comp_ring.size =
2052 adapter->rx_queue.rx_ring[0].size +
2053 adapter->rx_queue.rx_ring[1].size;
2054 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
2056 printk(KERN_ERR "%s: failed to re-create rx queue,"
2057 " error %d. Closing it.\n", netdev->name, err);
2061 err = vmxnet3_activate_dev(adapter);
2063 printk(KERN_ERR "%s: failed to re-activate, error %d. "
2064 "Closing it\n", netdev->name, err);
2070 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2072 vmxnet3_force_close(adapter);
2079 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2081 struct net_device *netdev = adapter->netdev;
2083 netdev->features = NETIF_F_SG |
2085 NETIF_F_HW_VLAN_TX |
2086 NETIF_F_HW_VLAN_RX |
2087 NETIF_F_HW_VLAN_FILTER |
2092 printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
2094 adapter->rxcsum = true;
2095 adapter->jumbo_frame = true;
2096 adapter->lro = true;
2099 netdev->features |= NETIF_F_HIGHDMA;
2103 netdev->vlan_features = netdev->features;
2109 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2113 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2116 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2117 mac[4] = tmp & 0xff;
2118 mac[5] = (tmp >> 8) & 0xff;
2123 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2128 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2129 VMXNET3_CMD_GET_CONF_INTR);
2130 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2131 adapter->intr.type = cfg & 0x3;
2132 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2134 if (adapter->intr.type == VMXNET3_IT_AUTO) {
2137 adapter->intr.msix_entries[0].entry = 0;
2138 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2139 VMXNET3_LINUX_MAX_MSIX_VECT);
2141 adapter->intr.num_intrs = 1;
2142 adapter->intr.type = VMXNET3_IT_MSIX;
2146 err = pci_enable_msi(adapter->pdev);
2148 adapter->intr.num_intrs = 1;
2149 adapter->intr.type = VMXNET3_IT_MSI;
2154 adapter->intr.type = VMXNET3_IT_INTX;
2156 /* INT-X related setting */
2157 adapter->intr.num_intrs = 1;
2162 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2164 if (adapter->intr.type == VMXNET3_IT_MSIX)
2165 pci_disable_msix(adapter->pdev);
2166 else if (adapter->intr.type == VMXNET3_IT_MSI)
2167 pci_disable_msi(adapter->pdev);
2169 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2174 vmxnet3_tx_timeout(struct net_device *netdev)
2176 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2177 adapter->tx_timeout_count++;
2179 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2180 schedule_work(&adapter->work);
2185 vmxnet3_reset_work(struct work_struct *data)
2187 struct vmxnet3_adapter *adapter;
2189 adapter = container_of(data, struct vmxnet3_adapter, work);
2191 /* if another thread is resetting the device, no need to proceed */
2192 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2195 /* if the device is closed, we must leave it alone */
2196 if (netif_running(adapter->netdev)) {
2197 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2198 vmxnet3_quiesce_dev(adapter);
2199 vmxnet3_reset_dev(adapter);
2200 vmxnet3_activate_dev(adapter);
2202 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2205 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2209 static int __devinit
2210 vmxnet3_probe_device(struct pci_dev *pdev,
2211 const struct pci_device_id *id)
2213 static const struct net_device_ops vmxnet3_netdev_ops = {
2214 .ndo_open = vmxnet3_open,
2215 .ndo_stop = vmxnet3_close,
2216 .ndo_start_xmit = vmxnet3_xmit_frame,
2217 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2218 .ndo_change_mtu = vmxnet3_change_mtu,
2219 .ndo_get_stats = vmxnet3_get_stats,
2220 .ndo_tx_timeout = vmxnet3_tx_timeout,
2221 .ndo_set_multicast_list = vmxnet3_set_mc,
2222 .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
2223 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2224 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2225 #ifdef CONFIG_NET_POLL_CONTROLLER
2226 .ndo_poll_controller = vmxnet3_netpoll,
2230 bool dma64 = false; /* stupid gcc */
2232 struct net_device *netdev;
2233 struct vmxnet3_adapter *adapter;
2236 netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
2238 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2239 "%s\n", pci_name(pdev));
2243 pci_set_drvdata(pdev, netdev);
2244 adapter = netdev_priv(netdev);
2245 adapter->netdev = netdev;
2246 adapter->pdev = pdev;
2248 adapter->shared = pci_alloc_consistent(adapter->pdev,
2249 sizeof(struct Vmxnet3_DriverShared),
2250 &adapter->shared_pa);
2251 if (!adapter->shared) {
2252 printk(KERN_ERR "Failed to allocate memory for %s\n",
2255 goto err_alloc_shared;
2258 adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
2259 sizeof(struct Vmxnet3_TxQueueDesc) +
2260 sizeof(struct Vmxnet3_RxQueueDesc),
2261 &adapter->queue_desc_pa);
2263 if (!adapter->tqd_start) {
2264 printk(KERN_ERR "Failed to allocate memory for %s\n",
2267 goto err_alloc_queue_desc;
2269 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
2272 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2273 if (adapter->pm_conf == NULL) {
2274 printk(KERN_ERR "Failed to allocate memory for %s\n",
2280 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2284 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2286 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2288 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2289 " %s\n", ver, pci_name(pdev));
2294 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2296 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
2298 printk(KERN_ERR "Incompatible upt version (0x%x) for "
2299 "adapter %s\n", ver, pci_name(pdev));
2304 vmxnet3_declare_features(adapter, dma64);
2306 adapter->dev_number = atomic_read(&devices_found);
2307 vmxnet3_alloc_intr_resources(adapter);
2309 vmxnet3_read_mac_addr(adapter, mac);
2310 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2312 netdev->netdev_ops = &vmxnet3_netdev_ops;
2313 netdev->watchdog_timeo = 5 * HZ;
2314 vmxnet3_set_ethtool_ops(netdev);
2316 INIT_WORK(&adapter->work, vmxnet3_reset_work);
2318 netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
2319 SET_NETDEV_DEV(netdev, &pdev->dev);
2320 err = register_netdev(netdev);
2323 printk(KERN_ERR "Failed to register adapter %s\n",
2328 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2329 atomic_inc(&devices_found);
2333 vmxnet3_free_intr_resources(adapter);
2335 vmxnet3_free_pci_resources(adapter);
2337 kfree(adapter->pm_conf);
2339 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2340 sizeof(struct Vmxnet3_RxQueueDesc),
2341 adapter->tqd_start, adapter->queue_desc_pa);
2342 err_alloc_queue_desc:
2343 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2344 adapter->shared, adapter->shared_pa);
2346 pci_set_drvdata(pdev, NULL);
2347 free_netdev(netdev);
2352 static void __devexit
2353 vmxnet3_remove_device(struct pci_dev *pdev)
2355 struct net_device *netdev = pci_get_drvdata(pdev);
2356 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2358 flush_scheduled_work();
2360 unregister_netdev(netdev);
2362 vmxnet3_free_intr_resources(adapter);
2363 vmxnet3_free_pci_resources(adapter);
2364 kfree(adapter->pm_conf);
2365 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2366 sizeof(struct Vmxnet3_RxQueueDesc),
2367 adapter->tqd_start, adapter->queue_desc_pa);
2368 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2369 adapter->shared, adapter->shared_pa);
2370 free_netdev(netdev);
2377 vmxnet3_suspend(struct device *device)
2379 struct pci_dev *pdev = to_pci_dev(device);
2380 struct net_device *netdev = pci_get_drvdata(pdev);
2381 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2382 struct Vmxnet3_PMConf *pmConf;
2383 struct ethhdr *ehdr;
2384 struct arphdr *ahdr;
2386 struct in_device *in_dev;
2387 struct in_ifaddr *ifa;
2390 if (!netif_running(netdev))
2393 vmxnet3_disable_all_intrs(adapter);
2394 vmxnet3_free_irqs(adapter);
2395 vmxnet3_free_intr_resources(adapter);
2397 netif_device_detach(netdev);
2398 netif_stop_queue(netdev);
2400 /* Create wake-up filters. */
2401 pmConf = adapter->pm_conf;
2402 memset(pmConf, 0, sizeof(*pmConf));
2404 if (adapter->wol & WAKE_UCAST) {
2405 pmConf->filters[i].patternSize = ETH_ALEN;
2406 pmConf->filters[i].maskSize = 1;
2407 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2408 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2410 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2414 if (adapter->wol & WAKE_ARP) {
2415 in_dev = in_dev_get(netdev);
2419 ifa = (struct in_ifaddr *)in_dev->ifa_list;
2423 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
2424 sizeof(struct arphdr) + /* ARP header */
2425 2 * ETH_ALEN + /* 2 Ethernet addresses*/
2426 2 * sizeof(u32); /*2 IPv4 addresses */
2427 pmConf->filters[i].maskSize =
2428 (pmConf->filters[i].patternSize - 1) / 8 + 1;
2430 /* ETH_P_ARP in Ethernet header. */
2431 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
2432 ehdr->h_proto = htons(ETH_P_ARP);
2434 /* ARPOP_REQUEST in ARP header. */
2435 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
2436 ahdr->ar_op = htons(ARPOP_REQUEST);
2437 arpreq = (u8 *)(ahdr + 1);
2439 /* The Unicast IPv4 address in 'tip' field. */
2440 arpreq += 2 * ETH_ALEN + sizeof(u32);
2441 *(u32 *)arpreq = ifa->ifa_address;
2443 /* The mask for the relevant bits. */
2444 pmConf->filters[i].mask[0] = 0x00;
2445 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
2446 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
2447 pmConf->filters[i].mask[3] = 0x00;
2448 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
2449 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2452 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2457 if (adapter->wol & WAKE_MAGIC)
2458 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
2460 pmConf->numFilters = i;
2462 adapter->shared->devRead.pmConfDesc.confVer = 1;
2463 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
2464 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
2466 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2467 VMXNET3_CMD_UPDATE_PMCFG);
2469 pci_save_state(pdev);
2470 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
2472 pci_disable_device(pdev);
2473 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
2480 vmxnet3_resume(struct device *device)
2483 struct pci_dev *pdev = to_pci_dev(device);
2484 struct net_device *netdev = pci_get_drvdata(pdev);
2485 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2486 struct Vmxnet3_PMConf *pmConf;
2488 if (!netif_running(netdev))
2491 /* Destroy wake-up filters. */
2492 pmConf = adapter->pm_conf;
2493 memset(pmConf, 0, sizeof(*pmConf));
2495 adapter->shared->devRead.pmConfDesc.confVer = 1;
2496 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
2497 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
2499 netif_device_attach(netdev);
2500 pci_set_power_state(pdev, PCI_D0);
2501 pci_restore_state(pdev);
2502 err = pci_enable_device_mem(pdev);
2506 pci_enable_wake(pdev, PCI_D0, 0);
2508 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2509 VMXNET3_CMD_UPDATE_PMCFG);
2510 vmxnet3_alloc_intr_resources(adapter);
2511 vmxnet3_request_irqs(adapter);
2512 vmxnet3_enable_all_intrs(adapter);
2517 static struct dev_pm_ops vmxnet3_pm_ops = {
2518 .suspend = vmxnet3_suspend,
2519 .resume = vmxnet3_resume,
2523 static struct pci_driver vmxnet3_driver = {
2524 .name = vmxnet3_driver_name,
2525 .id_table = vmxnet3_pciid_table,
2526 .probe = vmxnet3_probe_device,
2527 .remove = __devexit_p(vmxnet3_remove_device),
2529 .driver.pm = &vmxnet3_pm_ops,
2535 vmxnet3_init_module(void)
2537 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
2538 VMXNET3_DRIVER_VERSION_REPORT);
2539 return pci_register_driver(&vmxnet3_driver);
2542 module_init(vmxnet3_init_module);
2546 vmxnet3_exit_module(void)
2548 pci_unregister_driver(&vmxnet3_driver);
2551 module_exit(vmxnet3_exit_module);
2553 MODULE_AUTHOR("VMware, Inc.");
2554 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
2555 MODULE_LICENSE("GPL v2");
2556 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);